diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 4f1cddb1688..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: rust -# cache dependencies: https://docs.travis-ci.com/user/caching/#Rust-Cargo-cache -cache: - directories: - - ~/.cargo -# rustdoc is slow on 1.34.0 and 1.35.0 (beta): use older version for docs -rust: - - 1.33.0 -os: - - linux -git: - depth: 5 -script: - - | - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_RUST_VERSION" == "1.33.0" && "$JOB" == "api_docs" ]]; then - echo "Running cargo docs on stable Rust on Linux" && - RUST_VERSION=1.33.0 make docs - fi -after_success: - # upload the documentation from the build if it's from Rust stable, Linux and not a pull request: - - | - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_RUST_VERSION" == "1.33.0" && "$TRAVIS_PULL_REQUEST" == false && "$JOB" == "api_docs" ]]; then - echo '' > target/doc/index.html \ - && mkdir target/doc/rusoto/ && echo '' > target/doc/rusoto/index.html \ - && sudo pip install ghp-import && ghp-import -n target/doc \ - && git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages \ - && cd .. - fi -env: - global: - - RUST_BACKTRACE=1 - - CARGO_INCREMENTAL=0 - matrix: - - JOB=api_docs -branches: - only: - - master - - auto -notifications: - email: false -matrix: - fast_finish: true diff --git a/.travis/split_workspace b/.travis/split_workspace deleted file mode 100755 index b3e5d6e689e..00000000000 --- a/.travis/split_workspace +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/python -"""Split members of workspace into evenly sized chunks for testing - -Example splitting members into three chunks: - -# Test first of three chunks -$ ./split_workspace 1 3 # rewrites Cargo.toml -$ cargo test - -# Reset Cargo.toml and test second of three chunks -$ git checkout Cargo.toml -$ ./split_workspace 2 3 -$ cargo test - -# Reset Cargo.toml and test third of three chunks -$ git checkout Cargo.toml -$ ./split_workspace 3 3 -$ cargo test -""" - -import sys -import toml - - -def get_chunk(members, part, total_parts): - chunk_size = int(len(members) / total_parts) - end = None if part == total_parts - 1 else chunk_size*(part+1) - return members[chunk_size*part:end] - - -def main(): - if len(sys.argv) != 3: - raise AssertionError('Expected exactly two arguments') - part, total_parts = int(sys.argv[1]) - 1, int(sys.argv[2]) - content = toml.load('Cargo.toml') - members_of_part = get_chunk(content['workspace']['members'], part, total_parts) - content['workspace']['members'] = members_of_part - with open('Cargo.toml', 'w') as f: - toml.dump(content, f) - - -if __name__ == '__main__': - main() diff --git a/AWS-CREDENTIALS.md b/AWS-CREDENTIALS.md index 7480cbdb60c..5a46f816db8 100644 --- a/AWS-CREDENTIALS.md +++ b/AWS-CREDENTIALS.md @@ -1,20 +1,26 @@ ### Credentials -Rusoto has the ability to source AWS access credentials in a few different ways: +#### Best Practices -1. Environment variables via `rusoto_core::EnvironmentProvider` (`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`) -2. AWS credentials file via `rusoto_core::ProfileProvider` -3. IAM ECS container profile via `rusoto_core::ContainerProvider` -4. IAM EC2 instance profile via `rusoto_core::InstanceMetadataProvider` +Please follow the [AWS documentation on best practices](https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html) for managing credentials for your account. These credentials provide anyone who has access to them access to your AWS account. You can mitigate risk that goes along with that access by following these best practices. -There is also `rusoto_core::ChainProvider`, which is a convenience for attempting to source access credentials using the methods above in order. +#### Usage + +Much like the [standard AWS toolchain](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html), Rusoto has the ability to source AWS access credentials from multiple sources, either independently or in a tiered fashion. + +1. Environment variables via [`rusoto_core::EnvironmentProvider`](https://docs.rs/rusoto_credential/latest/rusoto_credential/struct.EnvironmentProvider.html) (`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`) +2. AWS credentials file via [`rusoto_core::ProfileProvider`](https://docs.rs/rusoto_credential/latest/rusoto_credential/struct.ProfileProvider.html) +3. IAM ECS container profile via [`rusoto_core::ContainerProvider`](https://docs.rs/rusoto_credential/latest/rusoto_credential/struct.ContainerProvider.html) +4. IAM EC2 instance profile via [`rusoto_core::InstanceMetadataProvider`](https://docs.rs/rusoto_credential/latest/rusoto_credential/struct.InstanceMetadataProvider.html) + +The [`rusoto_core::ChainProvider`](https://docs.rs/rusoto_credential/latest/rusoto_credential/struct.ChainProvider.html) is a convenience for attempting to source access credentials using all the methods above in that order. If credentials cannot be obtained through one method, it falls back to the next. If all possibilites are exhausted, an error will be returned. `ProfileProvider` (and `ChainProvider`) also allow you to specify a custom path to the credentials file and the name of the profile to use. If not explicitly provided as arguments, the values for these two parameters are computed according to the following rules: -* **location of credentials file:** if set and not empty the value of the environment variable ```AWS_SHARED_CREDENTIALS_FILE``` otherwise ```"~/.aws/credentials"```. -* **profile name:** if set and not empty the value of the environment variable ```AWS_PROFILE``` otherwise ```"default"``` +* **location of credentials file:** if set and not empty the value of the environment variable `AWS_SHARED_CREDENTIALS_FILE` otherwise `"~/.aws/credentials"`. +* **profile name:** if set and not empty the value of the environment variable ```AWS_PROFILE``` otherwise `"default"` It's also possible to implement your own credentials sourcing mechanism by creating a type that implements `rusoto_core::ProvideAwsCredentials`. @@ -23,11 +29,6 @@ It's also possible to implement your own credentials sourcing mechanism by creat If your aws account belongs to an organization and you need to use sts:AssumeRole, you're probably looking for `rusoto_sts::StsAssumeRoleSessionCredentialsProvider`. A simple program that uses sts:AssumeRole looks like this: ```rust,no_run -extern crate env_logger; -extern crate rusoto_core; -extern crate rusoto_ec2; -extern crate rusoto_sts; - use std::default::Default; use rusoto_core::{Region, HttpClient}; @@ -36,7 +37,7 @@ use rusoto_ec2::{Ec2Client, Ec2, DescribeSpotInstanceRequestsRequest}; use rusoto_sts::{StsClient, StsAssumeRoleSessionCredentialsProvider}; fn main() { - let _ = env_logger::try_init(); + env_logger::init(); let sts = StsClient::new(Region::EuWest1); @@ -56,11 +57,12 @@ fn main() { } ``` -### Important note about using the StsAssumeRoleSessionCredentialsProvider in the recommended way -**Be careful** that the current behavior of `rusoto_sts::StsAssumeRoleSessionCredentialsProvider` needs to be used with `rusoto_credential::AutoRefreshingProvider` as a wrapper to get advantage of using the already cached token of AssumeRole as it lives by default for 1 hour. -Current implementation is not using the cached token returned by the AssumeRole by default so it will be refreshed with every call to AWS resource. +### Important note about using the StsAssumeRoleSessionCredentialsProvider -This will affect the performance as well as the billing of AWS. +**Be careful**. The current `rusoto_sts::StsAssumeRoleSessionCredentialsProvider` should be used with `rusoto_credential::AutoRefreshingProvider` as a wrapper to take advantage of using the already cached token of AssumeRole as its only valid for 1 hour by default. +The current implementation does not reuse the cached token returned by the AssumeRole by default so it will be refreshed with every call to AWS resource. + +This will affect the performance as well as the size of you AWS bill. - https://docs.rs/rusoto_credential - https://crates.io/crates/rusoto_credential @@ -75,21 +77,11 @@ let provider = StsAssumeRoleSessionCredentialsProvider::new( let auto_refreshing_provider = rusoto_credential::AutoRefreshingProvider::new(provider); ``` -#### Credential refreshing - -Credentials obtained from environment variables and credential files expire ten minutes after being acquired and are refreshed on subsequent calls to `credentials()` (a method from the `ProvideAwsCredentials` trait). - -IAM instance profile credentials are refreshed as needed. -Upon calling `credentials()` it will see if they are expired or not. -If expired, it attempts to get new credentials from the metadata service. -If that fails it will return an error. -IAM credentials expiration time comes from the IAM metadata response. - #### Local integration testing of IAM credentials Edit the relevant `address`/IP locations in [credential/src/container.rs](credential/src/container.rs) and [credential/src/instance_metadata.rs](credential/src/instance_metadata.rs). For local testing, you can use [moe](https://github.com/matthewkmayer/moe) and set the string to this: ```rust,ignore -let mut address: String = "http://localhost:8080/latest/meta-data/iam/security-credentials".to_owned(); +let address: String = "http://localhost:8080/latest/meta-data/iam/security-credentials".to_owned(); ``` diff --git a/CHANGELOG.md b/CHANGELOG.md index 023c1809568..299cfe32bc3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,42 @@ ## [Unreleased] (Please put changes here) +- Use static initializer for AWS profile regex +- Add QLDB service +- Add QLDB Session service +- Update Skeptic tests for Rusoto v0.41 +- Don't decode query string parameters before encoding it. Results in fixing the prefix and marker +params for s3 `list_objects` methods +- Add Textract service +- Update CloudDirectory API definition to `2017-01-11` +- Add SecurityHub service +- Add Transfer service +- Introducing `rusoto_signature`, a standalone crate for signing HTTP requests. +- Make static credentials into a credential provider +- Add anonymous credentials support +- Don't trim whitepsace when parsing xml payload. Fixes truncating of items with spaces in payloads +such as an S3 key returned in `list_objects_v2` + +## [0.41.0] - 2019-10-07 + +- Add `HttpClient::from_builder` +- Upgrade to botocore from `1.12.163` to `1.12.230` +- The types `rusoto_events::{CloudWatchEvents,CloudWatchEventsClient}` were renamed to `rusoto_events::{EventBridge,EventBridgeClient}` +- Deserialize PostTextResponse correctly by allowing null values in the slots field +- Fix Profile Config Loading: should ignore comments with '=' chars +- Add App Mesh service +- Fix service_crategen to parse operations with multiple static params +- Refactor S3 integration tests - about a `#[test]` per behavior +- Add support for non signing clients +- Add EC2 Instance Connect service +- Allow deserialization of regions without an endpoint specified +- Add ApNortheast3 region +- Add MeSouth1 region +- Add x-amz-content-sha256 header to signed and canonical headers +- Added `Eq` and `Hash` implementations on `Region` +- Fixed parsing of Athena error messages +- Fix credential_process behavior when using the non-default profile +- Correctly read session tokens from credential_process ## [0.40.0] - 2019-06-28 diff --git a/Cargo.toml b/Cargo.toml index 57ade6fc85a..cc6ffa85c71 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,10 @@ [workspace] members = [ "mock", + "rusoto/credential_service_mock", "rusoto/core", "rusoto/credential", + "rusoto/signature", "rusoto/services/acm", "rusoto/services/acm-pca", "rusoto/services/alexaforbusiness", @@ -11,6 +13,7 @@ members = [ "rusoto/services/apigatewaymanagementapi", "rusoto/services/apigatewayv2", "rusoto/services/application-autoscaling", + "rusoto/services/appmesh", "rusoto/services/appstream", "rusoto/services/appsync", "rusoto/services/athena", @@ -54,6 +57,7 @@ members = [ "rusoto/services/dynamodb", "rusoto/services/dynamodbstreams", "rusoto/services/ec2", + "rusoto/services/ec2-instance-connect", "rusoto/services/ecr", "rusoto/services/ecs", "rusoto/services/elasticache", @@ -117,6 +121,8 @@ members = [ "rusoto/services/pi", "rusoto/services/polly", "rusoto/services/pricing", + "rusoto/services/qldb", + "rusoto/services/qldb-session", "rusoto/services/ram", "rusoto/services/rds", "rusoto/services/rds-data", @@ -131,6 +137,7 @@ members = [ "rusoto/services/sagemaker-runtime", "rusoto/services/sdb", "rusoto/services/secretsmanager", + "rusoto/services/securityhub", "rusoto/services/serverlessrepo", "rusoto/services/servicecatalog", "rusoto/services/servicediscovery", @@ -146,7 +153,9 @@ members = [ "rusoto/services/sts", "rusoto/services/support", "rusoto/services/swf", + "rusoto/services/textract", "rusoto/services/transcribe", + "rusoto/services/transfer", "rusoto/services/translate", "rusoto/services/waf", "rusoto/services/waf-regional", @@ -167,4 +176,4 @@ exclude = [ [profile.bench] opt-level = 3 debug = false -debug-assertions = false \ No newline at end of file +debug-assertions = false diff --git a/Makefile b/Makefile index a87cacf2ed2..6c7b634c66f 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,11 @@ docs: unit_test: cargo +$$RUST_VERSION test --all +# Doctests can be very slow to compile and run. This option lets us skip those if needed. +.PHONY: unit_test_no_doctests +unit_test_no_doctests: + cargo +$$RUST_VERSION test --all --lib + .PHONY: skeptical skeptical: (cd skeptical && cargo +$$RUST_VERSION test) @@ -41,7 +46,8 @@ check_integration_test: .PHONY: rustls_unit_test rustls_unit_test: - cargo +$$RUST_VERSION test --all -v --no-default-features --features=rustls + (cd rusoto/core && cargo +$$RUST_VERSION test --no-default-features --features=rustls) + (cd rusoto/services && ./rustls-unit-test.sh $$RUST_VERSION) .PHONY: check_service_defintions check_service_defintions: @@ -54,3 +60,7 @@ time_credentials: .PHONY: bench_s3 bench_s3: (cd rusoto/services/s3 && cargo +nightly bench) + +.PHONY: credential_integration_test +credential_integration_test: + (cd rusoto/credential_service_mock && ./run-and-test.sh ) diff --git a/README.md b/README.md index 5ff45ef2ff7..30cbff49dbd 100644 --- a/README.md +++ b/README.md @@ -43,9 +43,9 @@ For example, to include only S3 and SQS: ```toml [dependencies] -rusoto_core = "0.40.0" -rusoto_sqs = "0.40.0" -rusoto_s3 = "0.40.0" +rusoto_core = "0.41.0" +rusoto_sqs = "0.41.0" +rusoto_s3 = "0.41.0" ``` ## Migration notes @@ -110,12 +110,14 @@ Information on release schedules and procedures are in [RELEASING](RELEASING.md) See [CONTRIBUTING](CONTRIBUTING.md). -## Supported OSs and Rust versions +## Supported OSs, Rust versions and non-AWS projects Linux, OSX and Windows are supported and tested via Azure Pipelines and Appveyor. Rust stable, beta and nightly are supported. +Rusoto's primary aim is to be used with AWS. Other projects that provide AWS-like APIs, such as Ceph, Minio, Yandex Object Storage, etc... are not a focus at this time. PRs to fix issues with Rusoto and AWS-like APIs are welcome but generally won't be created by Rusoto maintainers. + ## License Rusoto is distributed under the terms of the MIT license. diff --git a/RELEASING.md b/RELEASING.md index d8dbb5ba5ac..108676ec9fe 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -27,18 +27,20 @@ Rusoto uses [semantic versioning 2.0.0](http://semver.org/). 3. Publish new version of `rusoto_credential` if changes have been made to it. 4. Publish new version of `rusoto_core` if changes have been made to it. 5. Publish new version of `rusoto_mock` if it changes have been made to it. -6. Run `publish-services.sh` in the `rusoto/services` dir. *Warning*: takes >2 hours on a low end Macbook. The script can be run again if an issue comes up without problems - crates.io prevents republishing. +6. Run `publish-services.sh` in the `rusoto/services` dir. *Warning*: takes >4 hours on a low end Macbook. The script can be run again if an issue comes up without problems - crates.io prevents republishing. 7. Tag master branch with the new version. Example: `git tag -a rusoto-v0.21.0 -m "Rusoto 0.21.0 release."` then `git push --tags origin`. ### Git tags -Due to multiple crates being in the repo, releases for each crate will be in the format `crate-vmajor.minor.patch`. +Due to multiple crates being in the repo, releases for each crate will be in the format `crate-vmajor.minor.patch`. Rusoto core, service crates, credentials and `rusoto_mock` will all have the same versions for a new release: Examples: -* `rusoto-v0.21.0` -* `credentials-v0.3.0` -* `mock-v0.27.0` +* `rusoto-v0.41.0` +* `credentials-v0.41.0` +* `mock-v0.41.0` + +When bug fixes for a crate are published, all crates get a new release. ### Release notes diff --git a/appveyor.yml b/appveyor.yml index 8fb9bff2387..877a8131748 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -27,8 +27,9 @@ install: build: off test_script: - cargo update - - make unit_test - - make rustls_unit_test + - set RUST_VERSION=%CHANNEL% + - echo using rust_version %RUST_VERSION% + - make unit_test_no_doctests - make check_integration_test branches: diff --git a/azure-pipelines.yml b/azure-pipelines.yml index ed89c4afad3..94057e743cd 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -17,10 +17,26 @@ jobs: displayName: 'Install Rust' - script: make unit_test displayName: 'Run unit tests' - - script: make rustls_unit_test - displayName: 'Run unit tests with rustls' - script: make check_integration_test displayName: 'Cargo check integration tests' + - script: make credential_integration_test + displayName: 'Credential integration tests' + +- job: 'rustls_unit_tests_linux' + displayName: 'Rustls unit tests on Linux' + pool: + vmImage: 'Ubuntu-16.04' + + steps: + - checkout: self + fetchDepth: 5 + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + displayName: 'Install Rust' + - script: make rustls_unit_test + displayName: 'Run unit tests with rustls' - job: 'unit_and_integration_tests_linux_beta' displayName: 'Unit and integration tests on Linux (beta channel)' @@ -41,10 +57,26 @@ jobs: displayName: 'Install Rust beta' - script: RUST_VERSION=beta make unit_test displayName: 'Run unit tests' + +- job: 'rustls_unit_tests_linux_beta' + displayName: 'Rustls unit tests on Linux (beta channel)' + pool: + vmImage: 'Ubuntu-16.04' + + steps: + - checkout: self + fetchDepth: 5 + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain beta-x86_64-unknown-linux-gnu -y + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + displayName: 'Install Rustup' + - script: | + rustup install beta + rustup default beta + displayName: 'Install Rust beta' - script: RUST_VERSION=beta make rustls_unit_test displayName: 'Run unit tests with rustls' - - script: RUST_VERSION=beta make check_integration_test - displayName: 'Cargo check integration tests' - job: 'unit_and_integration_tests_linux_nightly' displayName: 'Unit and integration tests on Linux (nightly channel)' @@ -66,13 +98,31 @@ jobs: - script: RUST_VERSION=nightly make unit_test displayName: 'Run unit tests' continueOnError: true - - script: RUST_VERSION=nightly make rustls_unit_test - displayName: 'Run unit tests with rustls' - continueOnError: true - script: RUST_VERSION=nightly make check_integration_test displayName: 'Cargo check integration tests' continueOnError: true +- job: 'rustls_unit_tests_linux_nightly' + displayName: 'Rustls unit tests on Linux (nightly channel)' + pool: + vmImage: 'Ubuntu-16.04' + + steps: + - checkout: self + fetchDepth: 5 + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain nightly-x86_64-unknown-linux-gnu -y + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + displayName: 'Install Rustup' + - script: | + rustup install nightly + rustup default nightly + displayName: 'Install Rust beta' + - script: RUST_VERSION=nightly make rustls_unit_test + displayName: 'Run unit tests with rustls' + continueOnError: true + - job: 'unit_and_integration_tests_osx' displayName: 'Unit and integration tests on OSX' pool: @@ -88,11 +138,25 @@ jobs: displayName: 'Install Rust' - script: make unit_test displayName: 'Run unit tests' - - script: make rustls_unit_test - displayName: 'Run unit tests with rustls' - script: make check_integration_test displayName: 'Cargo check integration tests' +- job: 'rustls_unit_tests_osx' + displayName: 'Unit and integration tests on OSX' + pool: + vmImage: 'macos-10.13' + + steps: + - checkout: self + fetchDepth: 5 + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + displayName: 'Install Rust' + - script: make rustls_unit_test + displayName: 'Run unit tests with rustls' + - job: 'unit_and_integration_tests_osx_beta' displayName: 'Unit and integration tests on OSX (beta channel)' pool: @@ -108,11 +172,25 @@ jobs: displayName: 'Install Rust' - script: RUST_VERSION=beta make unit_test displayName: 'Run unit tests' - - script: RUST_VERSION=beta make rustls_unit_test - displayName: 'Run unit tests with rustls' - script: RUST_VERSION=beta make check_integration_test displayName: 'Cargo check integration tests' +- job: 'rustls_unit_tests_osx_beta' + displayName: 'Rustls unit tests on OSX (beta channel)' + pool: + vmImage: 'macos-10.13' + + steps: + - checkout: self + fetchDepth: 5 + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain beta + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + displayName: 'Install Rust' + - script: RUST_VERSION=beta make rustls_unit_test + displayName: 'Run unit tests with rustls' + - job: 'unit_and_integration_tests_osx_nightly' displayName: 'Unit and integration tests on OSX (nightly channel)' pool: @@ -129,13 +207,27 @@ jobs: - script: RUST_VERSION=nightly make unit_test displayName: 'Run unit tests' continueOnError: true - - script: RUST_VERSION=nightly make rustls_unit_test - displayName: 'Run unit tests with rustls' - continueOnError: true - script: RUST_VERSION=nightly make check_integration_test displayName: 'Cargo check integration tests' continueOnError: true +- job: 'rustls_unit_tests_osx_nightly' + displayName: 'Rustls unit tests on OSX (nightly channel)' + pool: + vmImage: 'macos-10.13' + + steps: + - checkout: self + fetchDepth: 5 + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain nightly + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + displayName: 'Install Rust' + - script: RUST_VERSION=nightly make rustls_unit_test + displayName: 'Run unit tests with rustls' + continueOnError: true + - job: 'crate_gen_linux' displayName: 'Crate generation on Linux' pool: diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index fe6952c9683..214391a86f2 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -22,6 +22,7 @@ debug = false [dependencies.rusoto_core] path = "../rusoto/core" +default-features = false [dependencies.rusoto_acm] optional = true @@ -43,6 +44,10 @@ path = "../rusoto/services/apigateway" optional = true path = "../rusoto/services/application-autoscaling" +[dependencies.rusoto_appmesh] +optional = true +path = "../rusoto/services/appmesh" + [dependencies.rusoto_appstream] optional = true path = "../rusoto/services/appstream" @@ -215,6 +220,10 @@ path = "../rusoto/services/dynamodbstreams" optional = true path = "../rusoto/services/ec2" +[dependencies.rusoto_ec2_instance_connect] +optional = true +path = "../rusoto/services/ec2-instance-connect" + [dependencies.rusoto_ecr] optional = true path = "../rusoto/services/ecr" @@ -511,6 +520,10 @@ path = "../rusoto/services/sdb" optional = true path = "../rusoto/services/secretsmanager" +[dependencies.rusoto_securityhub] +optional = true +path = "../rusoto/services/securityhub" + [dependencies.rusoto_serverlessrepo] optional = true path = "../rusoto/services/serverlessrepo" @@ -575,6 +588,10 @@ path = "../rusoto/services/swf" optional = true path = "../rusoto/services/transcribe" +[dependencies.rusoto_transfer] +optional = true +path = "../rusoto/services/transfer" + [dependencies.rusoto_translate] optional = true path = "../rusoto/services/translate" @@ -619,172 +636,181 @@ path = "../rusoto/services/apigatewaymanagementapi" optional = true path = "../rusoto/services/apigatewayv2" +[dependencies.rusoto_qldb] +optional = true +path = "../rusoto/services/qldb" + [dependencies.rusoto_ram] optional = true path = "../rusoto/services/ram" [dev-dependencies] -env_logger = "0.5" +env_logger = "0.7" futures = "0.1.16" futures-fs = "0.0.3" log = "0.4.1" -rand = "^0.3.14" +rand = "^0.7.2" time = "0.1.37" reqwest = "0.9" http = "0.1.17" [features] all = [ - "core", - "acm", - "acm-pca", - "alexaforbusiness", - "apigateway", - "application-autoscaling", - "appstream", - "appsync", - "athena", - "autoscaling", - "autoscaling-plans", - "batch", - "budgets", - "ce", - "cloud9", - "chime", - "clouddirectory", - "cloudformation", - "cloudfront", - "cloudhsm", - "cloudhsmv2", - "cloudsearch", - "cloudsearchdomain", - "cloudtrail", - "cloudwatch", - "codebuild", - "codecommit", - "codedeploy", - "codepipeline", - "codestar", - "cognito-identity", - "cognito-idp", - "cognito-sync", - "comprehend", - "comprehendmedical", - "config", - "connect", - "cur", - "datapipeline", - "dax", - "devicefarm", - "directconnect", - "discovery", - "dms", - "docdb", - "ds", - "dynamodb", - "dynamodbstreams", - "ec2", - "ecr", - "ecs", - "elasticache", - "elasticbeanstalk", - "efs", - "eks", - "elastictranscoder", - "elb", - "elbv2", - "emr", - "events", - "firehose", - "fms", - "fsx", - "gamelift", - "glacier", - "glue", - "greengrass", - "guardduty", - "iam", - "importexport", - "inspector", - "iot", - "iotanalytics", - "iotdata", - "iot1clickdevices", - "iot1clickprojects", - "kafka", - "kinesis", - "kinesisanalytics", - "kinesisvideo", - "kinesisvideomedia", - "kinesisvideoarchivedmedia", - "kms", - "lambda", - "lex-models", - "license-manager", - "lightsail", - "lex-runtime", - "logs", - "neptune", - "machinelearning", - "macie", - "marketplacecommerceanalytics", - "marketplace-entitlement", - "mediaconvert", - "medialive", - "mediapackage", - "mediastore", - "mediatailor", - "mgh", - "mobile", - "mq", - "mturk", - "opsworks", - "opsworkscm", - "organizations", - "pi", - "polly", - "pricing", - "redshift", - "rekognition", - "resource-groups", - "resourcegroupstaggingapi", - "rds", - "rds-data", - "route53", - "route53domains", - "s3", - "sagemaker", - "sagemaker-runtime", - "sdb", - "secretsmanager", - "serverlessrepo", - "servicecatalog", - "servicediscovery", - "ses", - "shield", - "sms", - "snowball", - "sns", - "sqs", - "ssm", - "stepfunctions", - "storagegateway", - "sts", - "support", - "swf", - "transcribe", - "translate", - "waf", - "waf-regional", - "workdocs", - "worklink", - "workmail", - "workspaces", - "xray", - "amplify", - "apigatewaymanagementapi", - "apigatewayv2", - "ram" + "core", + "acm", + "acm-pca", + "alexaforbusiness", + "apigateway", + "application-autoscaling", + "appmesh", + "appstream", + "appsync", + "athena", + "autoscaling", + "autoscaling-plans", + "batch", + "budgets", + "ce", + "cloud9", + "chime", + "clouddirectory", + "cloudformation", + "cloudfront", + "cloudhsm", + "cloudhsmv2", + "cloudsearch", + "cloudsearchdomain", + "cloudtrail", + "cloudwatch", + "codebuild", + "codecommit", + "codedeploy", + "codepipeline", + "codestar", + "cognito-identity", + "cognito-idp", + "cognito-sync", + "comprehend", + "comprehendmedical", + "config", + "connect", + "cur", + "datapipeline", + "dax", + "devicefarm", + "directconnect", + "discovery", + "dms", + "docdb", + "ds", + "dynamodb", + "dynamodbstreams", + "ec2", + "ec2-instance-connect", + "ecr", + "ecs", + "elasticache", + "elasticbeanstalk", + "efs", + "eks", + "elastictranscoder", + "elb", + "elbv2", + "emr", + "events", + "firehose", + "fms", + "fsx", + "gamelift", + "glacier", + "glue", + "greengrass", + "guardduty", + "iam", + "importexport", + "inspector", + "iot", + "iotanalytics", + "iotdata", + "iot1clickdevices", + "iot1clickprojects", + "kafka", + "kinesis", + "kinesisanalytics", + "kinesisvideo", + "kinesisvideomedia", + "kinesisvideoarchivedmedia", + "kms", + "lambda", + "lex-models", + "license-manager", + "lightsail", + "lex-runtime", + "logs", + "neptune", + "machinelearning", + "macie", + "marketplacecommerceanalytics", + "marketplace-entitlement", + "mediaconvert", + "medialive", + "mediapackage", + "mediastore", + "mediatailor", + "mgh", + "mobile", + "mq", + "mturk", + "opsworks", + "opsworkscm", + "organizations", + "pi", + "polly", + "pricing", + "redshift", + "rekognition", + "resource-groups", + "resourcegroupstaggingapi", + "rds", + "rds-data", + "route53", + "route53domains", + "s3", + "sagemaker", + "sagemaker-runtime", + "sdb", + "secretsmanager", + "securityhub", + "serverlessrepo", + "servicecatalog", + "servicediscovery", + "ses", + "shield", + "sms", + "snowball", + "sns", + "sqs", + "ssm", + "stepfunctions", + "storagegateway", + "sts", + "support", + "swf", + "transcribe", + "transfer", + "translate", + "waf", + "waf-regional", + "workdocs", + "worklink", + "workmail", + "workspaces", + "xray", + "amplify", + "apigatewaymanagementapi", + "apigatewayv2", + "ram", + "qldb" ] core = [] acm = ["rusoto_acm"] @@ -792,6 +818,7 @@ acm-pca = ["rusoto_acm_pca"] alexaforbusiness = ["rusoto_alexaforbusiness"] apigateway = ["rusoto_apigateway"] application-autoscaling = ["rusoto_application_autoscaling"] +appmesh = ["rusoto_appmesh"] appstream = ["rusoto_appstream"] appsync = ["rusoto_appsync"] athena = ["rusoto_athena"] @@ -835,6 +862,7 @@ ds = ["rusoto_ds"] dynamodb = ["rusoto_dynamodb"] dynamodbstreams = ["rusoto_dynamodbstreams"] ec2 = ["rusoto_ec2"] +ec2-instance-connect = ["rusoto_ec2_instance_connect"] ecr = ["rusoto_ecr"] ecs = ["rusoto_ecs"] elasticache = ["rusoto_elasticache"] @@ -909,6 +937,7 @@ sagemaker = ["rusoto_sagemaker"] sagemaker-runtime = ["rusoto_sagemaker_runtime"] sdb = ["rusoto_sdb"] secretsmanager = ["rusoto_secretsmanager"] +securityhub = ["rusoto_securityhub"] serverlessrepo = ["rusoto_serverlessrepo"] servicecatalog = ["rusoto_servicecatalog"] servicediscovery = ["rusoto_servicediscovery"] @@ -925,6 +954,7 @@ sts = ["rusoto_sts", "rusoto_ec2"] support = ["rusoto_support"] swf = ["rusoto_swf"] transcribe = ["rusoto_transcribe"] +transfer = ["rusoto_transfer"] translate = ["rusoto_translate"] waf = ["rusoto_waf"] waf-regional = ["rusoto_waf_regional"] @@ -937,6 +967,10 @@ amplify = ["rusoto_amplify"] apigatewaymanagementapi = ["rusoto_apigatewaymanagementapi"] apigatewayv2 = ["rusoto_apigatewayv2"] ram = ["rusoto_ram"] +qldb = ["rusoto_qldb"] nightly-testing = ["rusoto_core/nightly-testing"] disable_ceph_unsupported = [] disable_minio_unsupported = [] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] \ No newline at end of file diff --git a/integration_tests/tests/appmesh.rs b/integration_tests/tests/appmesh.rs new file mode 100644 index 00000000000..a32c29e3331 --- /dev/null +++ b/integration_tests/tests/appmesh.rs @@ -0,0 +1,23 @@ +#![cfg(feature = "appmesh")] + +extern crate rusoto_appmesh; +extern crate rusoto_core; + +use rusoto_appmesh::{AppMesh, AppMeshClient, ListMeshesInput}; +use rusoto_core::Region; + +#[test] +fn main() { + let appmesh = AppMeshClient::new(Region::UsEast1); + + match appmesh.list_meshes(ListMeshesInput::default()).sync() { + Ok(response) => { + for mesh_ref in response.meshes { + println!("arn -> {:?}", mesh_ref.arn); + } + } + Err(err) => { + panic!("Error listing meshes {:#?}", err); + } + } +} diff --git a/integration_tests/tests/cognitoidentity.rs b/integration_tests/tests/cognitoidentity.rs index 03234eea11b..7fd7c3cf3fe 100644 --- a/integration_tests/tests/cognitoidentity.rs +++ b/integration_tests/tests/cognitoidentity.rs @@ -4,8 +4,7 @@ extern crate rusoto_cognito_identity; extern crate rusoto_core; use rusoto_cognito_identity::{ - CognitoIdentity, CognitoIdentityClient, ListIdentitiesInput, - ListIdentityPoolsInput, + CognitoIdentity, CognitoIdentityClient, ListIdentitiesInput, ListIdentityPoolsInput, }; use rusoto_core::{Region, RusotoError}; diff --git a/integration_tests/tests/core.rs b/integration_tests/tests/core.rs index 8235ac95ee6..038473595b9 100644 --- a/integration_tests/tests/core.rs +++ b/integration_tests/tests/core.rs @@ -4,12 +4,14 @@ extern crate futures; extern crate reqwest; extern crate rusoto_core; +use futures::future::ok; use futures::Future; +use rusoto_core::request::{HttpClient, HttpResponse}; use rusoto_core::credential::{DefaultCredentialsProvider, ProvideAwsCredentials}; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::signature::SignedRequest; -use rusoto_core::Region; +use rusoto_core::{Client, Region}; #[test] fn get_caller_identity_presigned() { @@ -36,3 +38,36 @@ fn get_caller_identity_presigned() { "presigned url should succeed when used" ); } + +#[test] +fn with_signature() { + let client = Client::shared(); + let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/"); + let mut params = Params::new(); + params.put("Action", "GetCallerIdentity"); + params.put("Version", "2011-06-15"); + request.set_params(params); + let response = client + .sign_and_dispatch::(request, |r| Box::new(ok(r))) + .sync(); + assert!(response.is_ok(), response.err()); + let response: HttpResponse = response.unwrap(); + assert!(response.status == 200, format!("Signed request should succeed with status code 200. Got status code: {:?}, headers {:?}", response.status, response.headers)); +} + +#[test] +fn without_signature() { + let client = + Client::new_not_signing(HttpClient::new().expect("failed to create request dispatcher")); + let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/"); + let mut params = Params::new(); + params.put("Action", "GetCallerIdentity"); + params.put("Version", "2011-06-15"); + request.set_params(params); + let response = client + .sign_and_dispatch::(request, |r| Box::new(ok(r))) + .sync(); + assert!(response.is_ok(), response.err()); + let response: HttpResponse = response.unwrap(); + assert!(response.status == 403, format!("Unsigned API request must fail with status request 403. Got status code: {:?}, headers {:?}", response.status, response.headers)); +} diff --git a/integration_tests/tests/discovery.rs b/integration_tests/tests/discovery.rs index b78c70424b0..ca8c1305e11 100644 --- a/integration_tests/tests/discovery.rs +++ b/integration_tests/tests/discovery.rs @@ -7,8 +7,7 @@ extern crate rusoto_discovery; use rusoto_core::{Region, RusotoError}; use rusoto_discovery::{ - DescribeTagsRequest, Discovery, DiscoveryClient, - ListConfigurationsRequest, + DescribeTagsRequest, Discovery, DiscoveryClient, ListConfigurationsRequest, }; use std::str; diff --git a/integration_tests/tests/docdb.rs b/integration_tests/tests/docdb.rs index 7c07f1230a6..eec07855e7f 100644 --- a/integration_tests/tests/docdb.rs +++ b/integration_tests/tests/docdb.rs @@ -4,9 +4,7 @@ extern crate rusoto_core; extern crate rusoto_docdb; use rusoto_core::Region; -use rusoto_docdb::{ - Docdb, DocdbClient, DescribeDBClustersMessage, -}; +use rusoto_docdb::{DescribeDBClustersMessage, Docdb, DocdbClient}; #[test] fn should_describe_tags() { diff --git a/integration_tests/tests/ec2-instance-connect.rs b/integration_tests/tests/ec2-instance-connect.rs new file mode 100644 index 00000000000..6f11feaf3aa --- /dev/null +++ b/integration_tests/tests/ec2-instance-connect.rs @@ -0,0 +1,38 @@ +#![cfg(feature = "ec2-instance-connect")] + +extern crate rusoto_core; +extern crate rusoto_ec2_instance_connect; + +use rusoto_core::{Region, RusotoError}; +use rusoto_ec2_instance_connect::{ + Ec2InstanceConnect, Ec2InstanceConnectClient, SendSSHPublicKeyError, SendSSHPublicKeyRequest, +}; + +#[test] +fn send_ssh_public_key_correctly_errors_for_unknown_instance() { + let client = Ec2InstanceConnectClient::new(Region::UsEast1); + let request = SendSSHPublicKeyRequest { + availability_zone: "us-east-1a".into(), + instance_id: "i-00000000".into(), + instance_os_user: "ec2-user".into(), + ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+P68X1LG5rJXQL1ktjhMv84lP8gKgJoPk99GwWoM6lbJAv80WUgauB961I5i/3/Y0XWnYZfzFCP3+fTu/9+vEsfTd38hUW3QBGTPrx/jXyvTBRQc7bTirpeicfwL9SwM4ztYvuM45sGSeZkQIg+TMKVFGnR0ijCitG613fRP/NUw/jQjzUPj2ymCw43MIAD1BPQrznsyoaPWP/bKv91Y9ZtB1fOn3UzgWlwBGxzPNXx8boquLfHWi+ut+v1zfZpUBUjQtI4EIctjqzmxnyB1SPpxk0r5v2GR0qLChKzZ0IqdJmImlz2vqCuwUThJN9d/iF//kCeb76uJVsDOOtDWb user@host".into(), + }; + + match client.send_ssh_public_key(request).sync() { + Ok(_) => { + panic!("send_ssh_public_key should fail"); + } + Err(error) => match error { + RusotoError::Service(e) => match e { + SendSSHPublicKeyError::InvalidArgs(error) => assert!( + error.contains("Instance not found"), + "Missing error message" + ), + _ => panic!("Unexpected error"), + }, + _ => { + panic!("Should have a typed error from EC2 Instance Connect"); + } + }, + }; +} diff --git a/integration_tests/tests/ec2.rs b/integration_tests/tests/ec2.rs index 8ba3fdc1d5b..e814f6ba6fd 100644 --- a/integration_tests/tests/ec2.rs +++ b/integration_tests/tests/ec2.rs @@ -4,9 +4,7 @@ extern crate rusoto_core; extern crate rusoto_ec2; use rusoto_core::{Region, RusotoError}; -use rusoto_ec2::{ - CreateSnapshotRequest, DescribeInstancesRequest, Ec2, Ec2Client, -}; +use rusoto_ec2::{CreateSnapshotRequest, DescribeInstancesRequest, Ec2, Ec2Client}; use rusoto_ec2::{CreateTagsRequest, Tag}; use std::str; diff --git a/integration_tests/tests/elastictranscoder.rs b/integration_tests/tests/elastictranscoder.rs index be6953def2e..4c572be3a9c 100644 --- a/integration_tests/tests/elastictranscoder.rs +++ b/integration_tests/tests/elastictranscoder.rs @@ -11,6 +11,7 @@ extern crate rusoto_s3; use std::clone::Clone; use std::ops::{Deref, DerefMut}; +use rand::distributions::Alphanumeric; use rand::Rng; use rusoto_core::Region; use rusoto_elastictranscoder::{Ets, EtsClient}; @@ -128,11 +129,12 @@ fn create_client() -> TestEtsClient { /// ASCII characters to the specified prefix. /// Keeps it lower case to work with S3 requirements as of 3/1/2018. fn generate_unique_name(prefix: &str) -> String { + let mut rng = rand::thread_rng(); format!( "{}-{}", prefix, - rand::thread_rng() - .gen_ascii_chars() + std::iter::repeat(()) + .map(|()| rng.sample(Alphanumeric)) .take(AWS_SERVICE_RANDOM_SUFFIX_LENGTH) .collect::() ) @@ -339,7 +341,7 @@ fn list_presets() { .filter(|x| x.id == Some(AWS_ETS_WEB_PRESET_ID.to_owned())) .next(); web_preset.unwrap().clone() - }, + } }; assert_eq!(found_preset.id, Some(AWS_ETS_WEB_PRESET_ID.to_owned())); diff --git a/integration_tests/tests/events.rs b/integration_tests/tests/events.rs index 777e1adca79..0914079fcce 100644 --- a/integration_tests/tests/events.rs +++ b/integration_tests/tests/events.rs @@ -4,11 +4,11 @@ extern crate rusoto_core; extern crate rusoto_events; use rusoto_core::Region; -use rusoto_events::{CloudWatchEvents, CloudWatchEventsClient, ListRulesRequest}; +use rusoto_events::{EventBridge, EventBridgeClient, ListRulesRequest}; #[test] fn should_list_rules() { - let client = CloudWatchEventsClient::new(Region::UsEast1); + let client = EventBridgeClient::new(Region::UsEast1); let request = ListRulesRequest::default(); client.list_rules(request).sync().unwrap(); diff --git a/integration_tests/tests/fms.rs b/integration_tests/tests/fms.rs index 6e2275abf39..0b5f9ec8af7 100644 --- a/integration_tests/tests/fms.rs +++ b/integration_tests/tests/fms.rs @@ -11,7 +11,10 @@ use std::str; #[test] fn should_list_policies() { let client = FmsClient::new(Region::UsEast1); - let request = ListPoliciesRequest::default(); + let request = ListPoliciesRequest { + max_results: Some(1), + ..Default::default() + }; // If our account doesn't have access, assume everything is fine: match client.list_policies(request).sync() { @@ -22,7 +25,7 @@ fn should_list_policies() { .contains("is not currently delegated by AWS FM"), "Missing error message" ), - _ => panic!("Should have a typed error from FMS"), + _ => panic!("Should have a typed error from FMS, got {:?}", e), }, Ok(res) => println!("Got these policies: {:?}", res), } diff --git a/integration_tests/tests/fsx.rs b/integration_tests/tests/fsx.rs index d8b8f062c0f..0b645f2c695 100644 --- a/integration_tests/tests/fsx.rs +++ b/integration_tests/tests/fsx.rs @@ -4,7 +4,7 @@ extern crate rusoto_core; extern crate rusoto_fsx; use rusoto_core::Region; -use rusoto_fsx::{Fsx, FsxClient, DescribeFileSystemsRequest}; +use rusoto_fsx::{DescribeFileSystemsRequest, Fsx, FsxClient}; #[test] fn should_describe_filesystems() { diff --git a/integration_tests/tests/licensemanager.rs b/integration_tests/tests/licensemanager.rs index 9d954ffe82b..f4e1b7ecf84 100644 --- a/integration_tests/tests/licensemanager.rs +++ b/integration_tests/tests/licensemanager.rs @@ -18,9 +18,9 @@ fn should_list_domains() { match e { RusotoError::Service(err) => { assert!(format!("{:?}", err).contains("Denied")); - }, + } _ => (), }; - }, + } } } diff --git a/integration_tests/tests/machinelearning.rs b/integration_tests/tests/machinelearning.rs index 5b63f7970b2..4642c851cc9 100644 --- a/integration_tests/tests/machinelearning.rs +++ b/integration_tests/tests/machinelearning.rs @@ -19,7 +19,9 @@ fn should_describe_batch_predictions() { match client.describe_batch_predictions(request).sync() { Ok(_) => (), - Err(e) => assert!(e.to_string().contains("AmazonML is no longer available to new customers")), + Err(e) => assert!(e + .to_string() + .contains("AmazonML is no longer available to new customers")), }; } #[test] @@ -29,7 +31,9 @@ fn should_describe_data_sources() { match client.describe_data_sources(request).sync() { Ok(_) => (), - Err(e) => assert!(e.to_string().contains("AmazonML is no longer available to new customers")), + Err(e) => assert!(e + .to_string() + .contains("AmazonML is no longer available to new customers")), }; } #[test] @@ -39,6 +43,8 @@ fn should_describe_evaluations() { match client.describe_evaluations(request).sync() { Ok(_) => (), - Err(e) => assert!(e.to_string().contains("AmazonML is no longer available to new customers")), + Err(e) => assert!(e + .to_string() + .contains("AmazonML is no longer available to new customers")), }; } diff --git a/integration_tests/tests/qldb.rs b/integration_tests/tests/qldb.rs new file mode 100644 index 00000000000..a9b0dbbc931 --- /dev/null +++ b/integration_tests/tests/qldb.rs @@ -0,0 +1,17 @@ +#![cfg(feature = "qldb")] + +extern crate rusoto_core; +extern crate rusoto_qldb; + +use rusoto_core::Region; +use rusoto_qldb::{ListLedgersRequest, Qldb, QldbClient}; + +#[test] +fn should_list_ledgers() { + let client = QldbClient::new(Region::UsEast1); + let response = client + .list_ledgers(ListLedgersRequest::default()) + .sync() + .expect("expected an ok response"); + println!("response is {:#?}", response); +} diff --git a/integration_tests/tests/s3.rs b/integration_tests/tests/s3.rs index f9b036423b4..2805bc19481 100644 --- a/integration_tests/tests/s3.rs +++ b/integration_tests/tests/s3.rs @@ -2,9 +2,9 @@ extern crate env_logger; extern crate futures; extern crate futures_fs; +extern crate http; extern crate log; extern crate reqwest; -extern crate http; extern crate rusoto_core; extern crate rusoto_s3; extern crate time; @@ -19,125 +19,325 @@ use time::get_time; use futures::{Future, Stream}; use futures_fs::FsPool; -use rusoto_core::credential::{AwsCredentials, DefaultCredentialsProvider}; -use rusoto_core::{Region, ProvideAwsCredentials, RusotoError}; +use rusoto_core::credential::{AwsCredentials, DefaultCredentialsProvider, StaticProvider}; +use rusoto_core::{ProvideAwsCredentials, Region, RusotoError}; use rusoto_s3::util::{PreSignedRequest, PreSignedRequestOption}; use rusoto_s3::{ CORSConfiguration, CORSRule, CompleteMultipartUploadRequest, CompletedMultipartUpload, CompletedPart, CopyObjectRequest, CreateBucketRequest, CreateMultipartUploadRequest, - DeleteBucketRequest, DeleteObjectRequest, GetObjectError, GetObjectRequest, - HeadObjectRequest, ListObjectsRequest, ListObjectsV2Request, PutBucketCorsRequest, - PutObjectRequest, S3Client, StreamingBody, UploadPartCopyRequest, UploadPartRequest, S3, + DeleteBucketRequest, DeleteObjectRequest, GetObjectError, GetObjectRequest, HeadObjectRequest, + ListObjectsRequest, ListObjectsV2Request, PutBucketCorsRequest, PutObjectRequest, S3Client, + StreamingBody, UploadPartCopyRequest, UploadPartRequest, S3, }; -type TestClient = S3Client; +struct TestS3Client { + region: Region, + s3: S3Client, + bucket_name: String, + // This flag signifies whether this bucket was already deleted as part of a test + bucket_deleted: bool, +} -// Rust is in bad need of an integration test harness -// This creates the S3 resources needed for a suite of tests, -// executes those tests, and then destroys the resources -#[test] -fn test_all_the_things() { - let _ = env_logger::try_init(); +impl TestS3Client { + // construct S3 testing client + fn new(bucket_name: String) -> TestS3Client { + let region = if let Ok(endpoint) = env::var("S3_ENDPOINT") { + let region = Region::Custom { + name: "us-east-1".to_owned(), + endpoint: endpoint.to_owned(), + }; + println!( + "picked up non-standard endpoint {:?} from S3_ENDPOINT env. variable", + region + ); + region + } else { + Region::UsEast1 + }; + + TestS3Client { + region: region.to_owned(), + s3: S3Client::new(region), + bucket_name: bucket_name.to_owned(), + bucket_deleted: false, + } + } + + // construct an anonymous client for testing acls + fn create_anonymous_client(&self) -> S3Client { + if cfg!(feature = "disable_minio_unsupported") { + // Minio does not support setting acls, so to make tests pass, return a client that has + // the credentials of the bucket owner. + self.s3.clone() + } else { + S3Client::new_with( + rusoto_core::request::HttpClient::new().expect("Failed to creat HTTP client"), + StaticProvider::from(AwsCredentials::default()), + self.region.clone(), + ) + } + } - let region = if let Ok(endpoint) = env::var("S3_ENDPOINT") { - let region = Region::Custom { - name: "us-east-1".to_owned(), - endpoint: endpoint.to_owned(), + fn create_test_bucket(&self, name: String) { + let create_bucket_req = CreateBucketRequest { + bucket: name.clone(), + ..Default::default() }; - println!( - "picked up non-standard endpoint {:?} from S3_ENDPOINT env. variable", - region - ); - region - } else { - Region::UsEast1 + self.s3 + .create_bucket(create_bucket_req) + .sync() + .expect("Failed to create test bucket"); + } + + fn create_test_bucket_with_acl(&self, name: String, acl: Option) { + let create_bucket_req = CreateBucketRequest { + bucket: name.clone(), + acl, + ..Default::default() + }; + self.s3 + .create_bucket(create_bucket_req) + .sync() + .expect("Failed to create test bucket"); + } + + fn delete_object(&self, key: String) { + let delete_object_req = DeleteObjectRequest { + bucket: self.bucket_name.to_owned(), + key: key.to_owned(), + ..Default::default() + }; + + self.s3 + .delete_object(delete_object_req) + .sync() + .expect("Couldn't delete object"); + } + + fn put_test_object(&self, filename: String) { + let contents: Vec = Vec::new(); + let put_request = PutObjectRequest { + bucket: self.bucket_name.to_owned(), + key: filename.to_owned(), + body: Some(contents.into()), + ..Default::default() + }; + + self.s3 + .put_object(put_request) + .sync() + .expect("Failed to put test object"); + } +} + +impl Drop for TestS3Client { + fn drop(&mut self) { + if self.bucket_deleted { + return; + } + let delete_bucket_req = DeleteBucketRequest { + bucket: self.bucket_name.clone(), + ..Default::default() + }; + + match self.s3.delete_bucket(delete_bucket_req).sync() { + Ok(_) => println!("Deleted S3 bucket: {}", self.bucket_name), + Err(e) => println!("Failed to delete S3 bucket: {}", e), + } + } +} + +// inititializes logging +fn init_logging() { + let _ = env_logger::try_init(); +} + +#[test] +// creates a bucket and test listing buckets and items in bucket +fn test_bucket_creation_deletion() { + init_logging(); + + let bucket_name = format!("s3-test-bucket-{}", get_time().sec); + let mut test_client = TestS3Client::new(bucket_name.clone()); + + let create_bucket_req = CreateBucketRequest { + bucket: bucket_name.clone(), + ..Default::default() }; - let client = S3Client::new(region.clone()); - let credentials = DefaultCredentialsProvider::new() - .unwrap() - .credentials() - .wait() - .unwrap(); + // first create a bucket + let create_bucket_resp = test_client.s3.create_bucket(create_bucket_req).sync(); + assert!(create_bucket_resp.is_ok()); + println!( + "Bucket {} created, resp: {:#?}", + bucket_name.clone(), + create_bucket_resp.unwrap() + ); - let test_bucket = format!("rusoto-test-bucket-{}", get_time().sec); - let filename = format!("test_file_{}", get_time().sec); - let utf8_filename = format!("test[über]file@{}", get_time().sec); - let binary_filename = format!("test_file_b{}", get_time().sec); - let multipart_filename = format!("test_multipart_file_{}", get_time().sec); - let metadata_filename = format!("test_metadata_file_{}", get_time().sec); + // now lets check for our bucket and list items in the one we created + let resp = test_client.s3.list_buckets().sync(); + assert!(resp.is_ok()); - // get a list of list_buckets - test_list_buckets(&client); + let resp = resp.unwrap(); + let mut bucket_found = false; + for bucket in resp.buckets.unwrap().iter() { + if bucket.name == Some(bucket_name.clone()) { + bucket_found = true; + break; + } + } + assert!(bucket_found); - // create a bucket for these tests - test_create_bucket(&client, &test_bucket); + let list_obj_req = ListObjectsV2Request { + bucket: bucket_name.to_owned(), + start_after: Some("foo".to_owned()), + ..Default::default() + }; + let result = test_client.s3.list_objects_v2(list_obj_req).sync(); + assert!(result.is_ok()); - // list items v2 - list_items_in_bucket(&client, &test_bucket); + test_delete_bucket(&test_client.s3, &bucket_name); + test_client.bucket_deleted = true; +} - // do a multipart upload - test_multipart_upload(&client, ®ion, &credentials, &test_bucket, &multipart_filename); +#[test] +// test against normal files +fn test_puts_gets_deletes() { + init_logging(); + + let bucket_name = format!("test-bucket-{}-{}", "default".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket_with_acl(bucket_name.clone(), Some("public-read".to_owned())); // modify the bucket's CORS properties if cfg!(not(feature = "disable_minio_unsupported")) { // Minio support: CORS is not implemented by Minio - test_put_bucket_cors(&client, &test_bucket); + test_put_bucket_cors(&test_client.s3, &test_client.bucket_name); } + // file used for testing puts/gets + let filename = format!("test_file_{}", get_time().sec); + let filename2 = format!("test_file_2_{}", get_time().sec); + + // test failure responses on empty bucket + test_get_object_no_such_object(&test_client.s3, &test_client.bucket_name, &filename); + // PUT an object via buffer (no_credentials is an arbitrary choice) - test_put_object_with_filename( - &client, - &test_bucket, + test_put_object_with_filename_and_acl( + &test_client.s3, + &test_client.bucket_name, &filename, &"tests/sample-data/no_credentials", + Some("public-read".to_owned()), + ); + + // PUT a second copy of the object with tighter acls + test_put_object_with_filename( + &test_client.s3, + &test_client.bucket_name, + &filename2, + &"tests/sample-data/no_credentials", ); + // create an anonymous reader to test the acls + let ro_s3client = test_client.create_anonymous_client(); + // HEAD the object that was PUT - test_head_object(&client, &test_bucket, &filename); + test_head_object(&ro_s3client, &test_client.bucket_name, &filename); + + if cfg!(not(feature = "disable_minio_unsupported")) { + // HEAD the object that cannot be read, should return 403 + assert!(try_head_object(&ro_s3client, &test_client.bucket_name, &filename2).is_err()); + } + + // ... but it can be as the original owner + test_head_object(&test_client.s3, &test_client.bucket_name, &filename2); // GET the object - test_get_object(&client, &test_bucket, &filename); - test_get_object_range(&client, &test_bucket, &filename); + test_get_object(&ro_s3client, &test_client.bucket_name, &filename); + test_get_object_range(&ro_s3client, &test_client.bucket_name, &filename); + + // add two objects to test the listing by paging + for i in 1..3 { + test_client.put_test_object(format!("test_object_{}", i)); + } + + // list items with paging using list object API v1 + list_items_in_bucket_paged_v1(&ro_s3client, &test_client.bucket_name); + + // list items with paging using list object API v2 + if cfg!(not(feature = "disable_ceph_unsupported")) { + // Ceph support: this test depends on the list object v2 API which is not implemented by Ceph + list_items_in_bucket_paged_v2(&ro_s3client, &test_client.bucket_name); + } // copy the object to change its settings - test_copy_object(&client, &test_bucket, &filename); + test_copy_object(&test_client.s3, &test_client.bucket_name, &filename); + + // delete object, will also allow drop() to remove the bucket + test_delete_object(&test_client.s3, &test_client.bucket_name, &filename); + // remove test objects used for pagination tests + for i in 1..3 { + test_client.delete_object(format!("test_object_{}", i)); + } +} + +#[test] +// test against utf8 files +fn test_puts_gets_deletes_utf8() { + init_logging(); + + let bucket_name = format!("test-bucket-{}-{}", "utf-8".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket(bucket_name.clone()); + + let utf8_filename = format!("test[über]file@{}", get_time().sec); // UTF8 filenames test_put_object_with_filename( - &client, - &test_bucket, + &test_client.s3, + &test_client.bucket_name, &utf8_filename, &"tests/sample-data/no_credentials", ); - test_copy_object_utf8(&client, &test_bucket, &utf8_filename); + test_copy_object_utf8(&test_client.s3, &test_client.bucket_name, &utf8_filename); + test_delete_object(&test_client.s3, &test_client.bucket_name, &utf8_filename); +} - test_delete_object(&client, &test_bucket, &utf8_filename); +#[test] +// test against binary files +fn test_puts_gets_deletes_binary() { + init_logging(); + + let bucket_name = format!("test-bucket-{}-{}", "binary".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket(bucket_name.clone()); - // test failure responses - test_get_object_no_such_object(&client, &test_bucket, &binary_filename); + let binary_filename = format!("test_file_b{}", get_time().sec); // Binary objects: test_put_object_with_filename( - &client, - &test_bucket, + &test_client.s3, + &test_client.bucket_name, &binary_filename, &"tests/sample-data/binary-file", ); - test_get_object(&client, &test_bucket, &binary_filename); - test_get_object_blocking_read(&client, &test_bucket, &binary_filename); + test_get_object(&test_client.s3, &test_client.bucket_name, &binary_filename); + test_get_object_blocking_read(&test_client.s3, &test_client.bucket_name, &binary_filename); + test_delete_object(&test_client.s3, &test_client.bucket_name, &binary_filename); +} - // PUT an object via stream - let another_filename = format!("streaming{}", filename); - test_put_object_stream_with_filename( - &client, - &test_bucket, - &another_filename, - &"tests/sample-data/binary-file", - ); +#[test] +// test metadata ops +fn test_puts_gets_deletes_metadata() { + init_logging(); - // metadata tests + let bucket_name = format!("test-bucket-{}-{}", "metadata".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket(bucket_name.clone()); + + let metadata_filename = format!("test_metadata_file_{}", get_time().sec); let mut metadata = HashMap::::new(); metadata.insert( "rusoto-metadata-some".to_string(), @@ -146,65 +346,277 @@ fn test_all_the_things() { metadata.insert("rusoto-metadata-none".to_string(), "".to_string()); test_put_object_with_metadata( - &client, - &test_bucket, + &test_client.s3, + &test_client.bucket_name, &metadata_filename, &"tests/sample-data/no_credentials", &metadata, ); - test_head_object_with_metadata(&client, &test_bucket, &metadata_filename, &metadata); - test_get_object_with_metadata(&client, &test_bucket, &metadata_filename, &metadata); - - // list items with paging using list object API v1 - list_items_in_bucket_paged_v1(&client, &test_bucket); - - // list items with paging using list object API v2 - if cfg!(not(feature = "disable_ceph_unsupported")) { - // Ceph support: this test depends on the list object v2 API which is not implemented by Ceph - list_items_in_bucket_paged_v2(&client, &test_bucket); - } + test_head_object_with_metadata( + &test_client.s3, + &test_client.bucket_name, + &metadata_filename, + &metadata, + ); + test_get_object_with_metadata( + &test_client.s3, + &test_client.bucket_name, + &metadata_filename, + &metadata, + ); + test_delete_object( + &test_client.s3, + &test_client.bucket_name, + &metadata_filename, + ); +} - test_delete_object(&client, &test_bucket, &metadata_filename); - test_delete_object(&client, &test_bucket, &binary_filename); - test_delete_object(&client, &test_bucket, &another_filename); +#[test] +// test object ops using presigned urls +fn test_puts_gets_deletes_presigned_url() { + init_logging(); - // DELETE the object - test_delete_object(&client, &test_bucket, &filename); + let bucket_name = format!("test-bucket-{}-{}", "presigned".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket(bucket_name.clone()); - let filename = format!("{}_for_presigned", filename); + let filename = format!("test_file_{}_for_presigned", get_time().sec); // PUT an object for presigned url test_put_object_with_filename( - &client, - &test_bucket, + &test_client.s3, + &test_client.bucket_name, &filename, &"tests/sample-data/no_credentials", ); + + let credentials = DefaultCredentialsProvider::new() + .unwrap() + .credentials() + .wait() + .unwrap(); + // generate a presigned url - test_get_object_with_presigned_url(®ion, &credentials, &test_bucket, &filename); - test_get_object_with_expired_presigned_url(®ion, &credentials, &test_bucket, &filename); - test_put_object_with_presigned_url(®ion, &credentials, &test_bucket, &filename); - test_delete_object_with_presigned_url(®ion, &credentials, &test_bucket, &filename); + test_get_object_with_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &filename, + ); + test_get_object_with_expired_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &filename, + ); + test_put_object_with_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &filename, + ); + test_delete_object_with_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &filename, + ); - let utf8_filename = format!("{}_for_presigned", utf8_filename); + let utf8_filename = format!("test[über]file@{}_for_presigned", get_time().sec); // UTF8 filenames for presigned url test_put_object_with_filename( - &client, - &test_bucket, + &test_client.s3, + &test_client.bucket_name, &utf8_filename, &"tests/sample-data/no_credentials", ); // generate a presigned url - test_get_object_with_presigned_url(®ion, &credentials, &test_bucket, &utf8_filename); - test_get_object_with_expired_presigned_url(®ion, &credentials, &test_bucket, &utf8_filename); - test_put_object_with_presigned_url(®ion, &credentials, &test_bucket, &utf8_filename); - test_delete_object_with_presigned_url(®ion, &credentials, &test_bucket, &utf8_filename); + test_get_object_with_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &utf8_filename, + ); + test_get_object_with_expired_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &utf8_filename, + ); + test_put_object_with_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &utf8_filename, + ); + test_delete_object_with_presigned_url( + &test_client.region, + &credentials, + &test_client.bucket_name, + &utf8_filename, + ); +} + +#[test] +fn test_multipart_stream_uploads() { + init_logging(); + + let bucket_name = format!("test-bucket-{}-{}", "multipart".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket(bucket_name.clone()); + + let multipart_filename = format!("test_multipart_file_{}", get_time().sec); + let credentials = DefaultCredentialsProvider::new() + .unwrap() + .credentials() + .wait() + .unwrap(); + + // test put via multipart upload + test_multipart_upload( + &test_client.s3, + &test_client.region, + &credentials, + &test_client.bucket_name, + &multipart_filename, + ); + + // PUT an object via stream + let streaming_filename = format!("streaming_test_file_{}", get_time().sec); + test_put_object_stream_with_filename( + &test_client.s3, + &test_client.bucket_name, + &streaming_filename, + &"tests/sample-data/binary-file", + ); + + test_delete_object( + &test_client.s3, + &test_client.bucket_name, + &multipart_filename, + ); + + test_delete_object( + &test_client.s3, + &test_client.bucket_name, + &streaming_filename, + ) +} + +#[test] +fn test_list_objects_encoding() { + init_logging(); + + let bucket_name = format!("test-bucket-{}-{}", "encoding".to_owned(), get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); + test_client.create_test_bucket(bucket_name.clone()); + + let filename = "a%2Fb/c/test_file".to_owned(); + let prefix = "a%2Fb/c".to_owned(); + test_client.put_test_object(filename.clone()); + + let list_obj_req_v1 = ListObjectsRequest { + bucket: bucket_name.clone(), + marker: Some(prefix.clone()), + prefix: Some(prefix.clone()), + ..Default::default() + }; + + let resp_v1 = test_client + .s3 + .list_objects(list_obj_req_v1) + .sync() + .expect("failed to list objects v1"); + + assert!(&resp_v1.contents.is_some()); + let contents_v1 = resp_v1.contents.clone().unwrap(); + assert_eq!(contents_v1.len(), 1); + + let object = &contents_v1[0]; + assert!(&object.key.is_some()); + + let key = object.key.clone().unwrap(); + assert_eq!(key, filename); + + // wrap up v1 list obj test with getting the obj with the key returned + let get_obj_req = GetObjectRequest { + bucket: bucket_name.clone(), + key: key.clone(), + ..Default::default() + }; + assert!(test_client.s3.get_object(get_obj_req).sync().is_ok()); + + let list_obj_req_v2 = ListObjectsV2Request { + bucket: bucket_name.clone(), + prefix: Some(prefix.clone()), + ..Default::default() + }; + let resp_v2 = &test_client + .s3 + .list_objects_v2(list_obj_req_v2) + .sync() + .expect("failed to list objects v2"); + + assert!(&resp_v2.contents.is_some()); + let contents_v2 = resp_v2.contents.clone().unwrap(); + assert_eq!(contents_v2.len(), 1); + + let object = &contents_v2[0]; + assert!(&object.key.is_some()); + + let key = object.key.clone().unwrap(); + assert_eq!(key, filename); + + // wrap up v2 list obj test with getting the obj with the key returned + let get_obj_req = GetObjectRequest { + bucket: bucket_name.clone(), + key: key.clone(), + ..Default::default() + }; + assert!(test_client.s3.get_object(get_obj_req).sync().is_ok()); + + test_delete_object(&test_client.s3, &bucket_name, &key); +} + +#[test] +fn test_name_space_truncate() { + init_logging(); + + let bucket_name = format!("test-name-space-{}", get_time().sec); + let test_client = TestS3Client::new(bucket_name.clone()); - // delete the test bucket - test_delete_bucket(&client, &test_bucket); + test_client.create_test_bucket(bucket_name.clone()); + + let filename_spaces = "spaces ".to_owned(); + test_client.put_test_object(filename_spaces.clone()); + + let req = ListObjectsV2Request { + bucket: bucket_name.clone(), + ..Default::default() + }; + + let key = &test_client + .s3 + .list_objects_v2(req) + .sync() + .unwrap() + .contents + .unwrap()[0] + .clone() + .key + .unwrap(); + + assert_eq!(*key, filename_spaces); + test_delete_object(&test_client.s3, &bucket_name, &filename_spaces); } -fn test_multipart_upload(client: &TestClient, region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { +fn test_multipart_upload( + client: &S3Client, + region: &Region, + credentials: &AwsCredentials, + bucket: &str, + filename: &str, +) { let create_multipart_req = CreateMultipartUploadRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -341,11 +753,14 @@ fn test_multipart_upload(client: &TestClient, region: &Region, credentials: &Aws part_number: Some(2), }, ]; + let completed_upload2 = CompletedMultipartUpload { parts: Some(completed_parts_2), }; + let complete_req2 = CompleteMultipartUploadRequest { bucket: bucket.to_owned(), + key: filename.to_owned(), upload_id: upload_id2.to_owned(), multipart_upload: Some(completed_upload2), @@ -357,25 +772,9 @@ fn test_multipart_upload(client: &TestClient, region: &Region, credentials: &Aws .sync() .expect("Couldn't complete multipart upload2"); println!("{:#?}", response2); - - // delete the completed file - test_delete_object(client, bucket, filename); } -fn test_create_bucket(client: &TestClient, bucket: &str) { - let create_bucket_req = CreateBucketRequest { - bucket: bucket.to_owned(), - ..Default::default() - }; - - let result = client - .create_bucket(create_bucket_req) - .sync() - .expect("Couldn't create bucket"); - println!("{:#?}", result); -} - -fn test_delete_bucket(client: &TestClient, bucket: &str) { +fn test_delete_bucket(client: &S3Client, bucket: &str) { let delete_bucket_req = DeleteBucketRequest { bucket: bucket.to_owned(), ..Default::default() @@ -396,7 +795,7 @@ fn test_delete_bucket(client: &TestClient, bucket: &str) { } fn test_put_object_with_filename( - client: &TestClient, + client: &S3Client, bucket: &str, dest_filename: &str, local_filename: &str, @@ -418,8 +817,33 @@ fn test_put_object_with_filename( } } +fn test_put_object_with_filename_and_acl( + client: &S3Client, + bucket: &str, + dest_filename: &str, + local_filename: &str, + acl: Option, +) { + let mut f = File::open(local_filename).unwrap(); + let mut contents: Vec = Vec::new(); + match f.read_to_end(&mut contents) { + Err(why) => panic!("Error opening file to send to S3: {}", why), + Ok(_) => { + let req = PutObjectRequest { + bucket: bucket.to_owned(), + key: dest_filename.to_owned(), + body: Some(contents.into()), + acl, + ..Default::default() + }; + let result = client.put_object(req).sync().expect("Couldn't PUT object"); + println!("{:#?}", result); + } + } +} + fn test_put_object_stream_with_filename( - client: &TestClient, + client: &S3Client, bucket: &str, dest_filename: &str, local_filename: &str, @@ -438,21 +862,26 @@ fn test_put_object_stream_with_filename( println!("{:#?}", result); } -fn test_head_object(client: &TestClient, bucket: &str, filename: &str) { +fn try_head_object( + client: &S3Client, + bucket: &str, + filename: &str, +) -> Result> { let head_req = HeadObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; - let result = client - .head_object(head_req) - .sync() - .expect("Couldn't HEAD object"); + client.head_object(head_req).sync() +} + +fn test_head_object(client: &S3Client, bucket: &str, filename: &str) { + let result = try_head_object(client, bucket, filename).expect("Couldn't HEAD object"); println!("{:#?}", result); } -fn test_get_object(client: &TestClient, bucket: &str, filename: &str) { +fn test_get_object(client: &S3Client, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -471,7 +900,7 @@ fn test_get_object(client: &TestClient, bucket: &str, filename: &str) { assert!(body.len() > 0); } -fn test_get_object_blocking_read(client: &TestClient, bucket: &str, filename: &str) { +fn test_get_object_blocking_read(client: &S3Client, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -491,7 +920,7 @@ fn test_get_object_blocking_read(client: &TestClient, bucket: &str, filename: &s assert!(body.len() > 0); } -fn test_get_object_no_such_object(client: &TestClient, bucket: &str, filename: &str) { +fn test_get_object_no_such_object(client: &S3Client, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -504,7 +933,7 @@ fn test_get_object_no_such_object(client: &TestClient, bucket: &str, filename: & }; } -fn test_get_object_range(client: &TestClient, bucket: &str, filename: &str) { +fn test_get_object_range(client: &S3Client, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -520,7 +949,7 @@ fn test_get_object_range(client: &TestClient, bucket: &str, filename: &str) { assert_eq!(result.content_length.unwrap(), 2); } -fn test_copy_object(client: &TestClient, bucket: &str, filename: &str) { +fn test_copy_object(client: &S3Client, bucket: &str, filename: &str) { let req = CopyObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -538,7 +967,7 @@ fn test_copy_object(client: &TestClient, bucket: &str, filename: &str) { println!("{:#?}", result); } -fn test_copy_object_utf8(client: &TestClient, bucket: &str, filename: &str) { +fn test_copy_object_utf8(client: &S3Client, bucket: &str, filename: &str) { let req = CopyObjectRequest { bucket: bucket.to_owned(), key: format!("{}", filename.to_owned()), @@ -556,7 +985,7 @@ fn test_copy_object_utf8(client: &TestClient, bucket: &str, filename: &str) { println!("{:#?}", result); } -fn test_delete_object(client: &TestClient, bucket: &str, filename: &str) { +fn test_delete_object(client: &S3Client, bucket: &str, filename: &str) { let del_req = DeleteObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), @@ -570,25 +999,7 @@ fn test_delete_object(client: &TestClient, bucket: &str, filename: &str) { println!("{:#?}", result); } -fn test_list_buckets(client: &TestClient) { - let result = client.list_buckets().sync().expect("Couldn't list buckets"); - println!("\nbuckets available: {:#?}", result); -} - -fn list_items_in_bucket(client: &TestClient, bucket: &str) { - let list_obj_req = ListObjectsV2Request { - bucket: bucket.to_owned(), - start_after: Some("foo".to_owned()), - ..Default::default() - }; - let result = client - .list_objects_v2(list_obj_req) - .sync() - .expect("Couldn't list items in bucket (v2)"); - println!("Items in bucket: {:#?}", result); -} - -fn list_items_in_bucket_paged_v1(client: &TestClient, bucket: &str) { +fn list_items_in_bucket_paged_v1(client: &S3Client, bucket: &str) { let mut list_request = ListObjectsRequest { delimiter: Some("/".to_owned()), bucket: bucket.to_owned(), @@ -618,7 +1029,7 @@ fn list_items_in_bucket_paged_v1(client: &TestClient, bucket: &str) { } // Assuming there's already more than three item in our test bucket: -fn list_items_in_bucket_paged_v2(client: &TestClient, bucket: &str) { +fn list_items_in_bucket_paged_v2(client: &S3Client, bucket: &str) { let mut list_obj_req = ListObjectsV2Request { bucket: bucket.to_owned(), max_keys: Some(1), @@ -645,7 +1056,7 @@ fn list_items_in_bucket_paged_v2(client: &TestClient, bucket: &str) { ); } -fn test_put_bucket_cors(client: &TestClient, bucket: &str) { +fn test_put_bucket_cors(client: &S3Client, bucket: &str) { let cors_rules = vec![CORSRule { allowed_methods: vec!["PUT".to_owned(), "POST".to_owned(), "DELETE".to_owned()], allowed_origins: vec!["http://www.example.com".to_owned()], @@ -673,7 +1084,7 @@ fn test_put_bucket_cors(client: &TestClient, bucket: &str) { } fn test_put_object_with_metadata( - client: &TestClient, + client: &S3Client, bucket: &str, dest_filename: &str, local_filename: &str, @@ -698,7 +1109,7 @@ fn test_put_object_with_metadata( } fn test_head_object_with_metadata( - client: &TestClient, + client: &S3Client, bucket: &str, filename: &str, metadata: &HashMap, @@ -720,7 +1131,7 @@ fn test_head_object_with_metadata( } fn test_get_object_with_metadata( - client: &TestClient, + client: &S3Client, bucket: &str, filename: &str, metadata: &HashMap, @@ -756,8 +1167,7 @@ fn test_get_object_with_presigned_url( println!("get object presigned url: {:#?}", presigned_url); let mut res = reqwest::get(&presigned_url).expect("Couldn't get object via presigned url"); assert_eq!(res.status(), http::StatusCode::OK); - let size = res.content_length() - .unwrap_or(0); + let size = res.content_length().unwrap_or(0); assert!(size > 0); let mut buf: Vec = vec![]; res.copy_to(&mut buf).expect("Copying failed"); diff --git a/integration_tests/tests/securityhub.rs b/integration_tests/tests/securityhub.rs new file mode 100644 index 00000000000..6ba4c7d123e --- /dev/null +++ b/integration_tests/tests/securityhub.rs @@ -0,0 +1,20 @@ +#![cfg(feature = "securityhub")] + +extern crate env_logger; +extern crate rusoto_core; +extern crate rusoto_securityhub; + +use rusoto_core::Region; +use rusoto_securityhub::{ListInvitationsRequest, SecurityHub, SecurityHubClient}; + +#[test] +fn should_list_invitations() { + let _ = env_logger::try_init(); + let client = SecurityHubClient::new(Region::UsWest2); + let request = ListInvitationsRequest { + ..Default::default() + }; + + let result = client.list_invitations(request).sync(); + assert!(result.is_ok()); +} diff --git a/integration_tests/tests/transfer.rs b/integration_tests/tests/transfer.rs new file mode 100644 index 00000000000..ca7bb48ab0d --- /dev/null +++ b/integration_tests/tests/transfer.rs @@ -0,0 +1,15 @@ +#![cfg(feature = "transfer")] + +extern crate rusoto_core; +extern crate rusoto_transfer; + +use rusoto_core::Region; +use rusoto_transfer::{ListServersRequest, Transfer, TransferClient}; + +#[test] +fn should_list_servers() { + let client = TransferClient::new(Region::UsEast1); + let request = ListServersRequest::default(); + + println!("{:?}", client.list_servers(request).sync().unwrap()); +} diff --git a/integration_tests/tests/worklink.rs b/integration_tests/tests/worklink.rs index c346aae025f..2e6e46a01d0 100644 --- a/integration_tests/tests/worklink.rs +++ b/integration_tests/tests/worklink.rs @@ -6,7 +6,6 @@ extern crate rusoto_worklink; use rusoto_core::Region; use rusoto_worklink::{ListFleetsRequest, Worklink, WorklinkClient}; - #[test] fn should_list_fleets() { let client = WorklinkClient::new(Region::UsEast1); diff --git a/mock/Cargo.toml b/mock/Cargo.toml index cf038e84276..b1f452e0e33 100644 --- a/mock/Cargo.toml +++ b/mock/Cargo.toml @@ -11,7 +11,7 @@ keywords = ["AWS", "Amazon", "mock", "testing"] license = "MIT" name = "rusoto_mock" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" categories = [ "development-tools::testing" @@ -19,8 +19,8 @@ categories = [ edition = "2018" [badges] -appveyor = { repository = "rusoto/rusoto", branch = "master" } -azure-devops = { project = "Rusoto", pipeline = "rusoto.rusoto", build="1" } +appveyor = { repository = "matthewkmayer/rusoto", branch = "master" } +azure-devops = { project = "matthewkmayer/Rusoto", pipeline = "rusoto.rusoto", build="1" } [dependencies] chrono = "0.4.0" @@ -36,8 +36,10 @@ version = "0.0" version = "> 0.25.0" path = "../rusoto/core" default_features = false -features = ["native-tls"] [features] nightly-testing = ["rusoto_core/nightly-testing"] unstable = [] +rustls = ["rusoto_core/rustls"] +native-tls = ["rusoto_core/native-tls"] +default = ["native-tls"] diff --git a/mock/src/lib.rs b/mock/src/lib.rs index cfa32536575..a3c50ffa98a 100644 --- a/mock/src/lib.rs +++ b/mock/src/lib.rs @@ -43,8 +43,8 @@ use std::fs::File; use std::io::Read; use std::time::Duration; -use futures::future::{ok, err, FutureResult}; -use http::{HeaderMap, header::HeaderName, HttpTryFrom, StatusCode}; +use futures::future::{err, ok, FutureResult}; +use http::{header::HeaderName, HeaderMap, HttpTryFrom, StatusCode}; use rusoto_core::credential::{AwsCredentials, CredentialsError, ProvideAwsCredentials}; use rusoto_core::request::HttpResponse; use rusoto_core::signature::SignedRequest; @@ -79,14 +79,14 @@ pub struct MockRequestDispatcher { } enum RequestOutcome { - Performed(StatusCode), - Failed(HttpDispatchError), + Performed(StatusCode), + Failed(HttpDispatchError), } impl Default for RequestOutcome { - fn default() -> RequestOutcome { - RequestOutcome::Performed(StatusCode::default()) - } + fn default() -> RequestOutcome { + RequestOutcome::Performed(StatusCode::default()) + } } impl MockRequestDispatcher { @@ -101,10 +101,10 @@ impl MockRequestDispatcher { /// Mocks the service request failing with a communications error pub fn with_dispatch_error(error: HttpDispatchError) -> MockRequestDispatcher { - MockRequestDispatcher { - outcome: RequestOutcome::Failed(error), - ..MockRequestDispatcher::default() - } + MockRequestDispatcher { + outcome: RequestOutcome::Failed(error), + ..MockRequestDispatcher::default() + } } /// Mocks the service response body what would be @@ -136,7 +136,8 @@ impl MockRequestDispatcher { /// Mocks a single service header that would be returned from AWS pub fn with_header(mut self, key: &str, value: &str) -> MockRequestDispatcher { - self.headers.insert(key.parse::().unwrap(), value.into()); + self.headers + .insert(key.parse::().unwrap(), value.into()); self } } @@ -149,12 +150,12 @@ impl DispatchSignedRequest for MockRequestDispatcher { self.request_checker.as_ref().unwrap()(&request); } match self.outcome { - RequestOutcome::Performed(ref status) => ok(HttpResponse { - status: *status, - body: ByteStream::from(self.body.clone()), - headers: self.headers.clone() - }), - RequestOutcome::Failed(ref error) => err(error.clone()), + RequestOutcome::Performed(ref status) => ok(HttpResponse { + status: *status, + body: ByteStream::from(self.body.clone()), + headers: self.headers.clone(), + }), + RequestOutcome::Failed(ref error) => err(error.clone()), } } } diff --git a/rusoto/core/Cargo.toml b/rusoto/core/Cargo.toml index f7b1cb65e76..72a9f972d79 100644 --- a/rusoto/core/Cargo.toml +++ b/rusoto/core/Cargo.toml @@ -13,14 +13,14 @@ license = "MIT" name = "rusoto_core" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" exclude = ["test_resources/*"] edition = "2018" [badges] -appveyor = { repository = "rusoto/rusoto", branch = "master" } -azure-devops = { project = "Rusoto", pipeline = "rusoto.rusoto", build="1" } +appveyor = { repository = "matthewkmayer/rusoto", branch = "master" } +azure-devops = { project = "matthewkmayer/Rusoto", pipeline = "rusoto.rusoto", build="1" } [build-dependencies] rustc_version = "0.2.1" @@ -28,37 +28,36 @@ rustc_version = "0.2.1" [dependencies] bytes = "0.4.12" futures = "0.1.16" -hmac = "0.5.0" http = "0.1.17" hyper = "0.12" hyper-tls = { version = "0.3.0", optional = true } -hyper-rustls = { version = "0.16.0", optional = true } +hyper-rustls = { version = "0.17.1", optional = true } lazy_static = "1.0" log = "0.4.1" -md5 = "0.3.6" -base64 = "0.9" -hex = "0.3" +base64 = "0.11" serde = "1.0.2" serde_derive = "1.0.2" serde_json = "1.0.2" -sha2 = "0.7.0" time = "0.1.35" -url = "1.6.0" tokio = "0.1.7" tokio-timer = "0.2.6" -xml-rs = "0.7" +xml-rs = "0.8" [dependencies.rusoto_credential] path = "../credential" -version = "0.40" +version = "0.41" + +[dependencies.rusoto_signature] +path = "../signature" +version = "0.41" [dependencies.clippy] optional = true version = "0.0" [dev-dependencies] -env_logger = "0.5" -rand = "0.4.2" +env_logger = "0.7" +rand = "0.7" serde_json = "1.0.1" serde_test = "1.0.1" diff --git a/rusoto/core/README.md b/rusoto/core/README.md index 86be9c39f57..88f8fbd25f1 100644 --- a/rusoto/core/README.md +++ b/rusoto/core/README.md @@ -38,9 +38,9 @@ For example, to include only S3 and SQS: ``` toml [dependencies] -rusoto_core = "0.40.0" -rusoto_sqs = "0.40.0" -rusoto_s3 = "0.40.0" +rusoto_core = "0.41.0" +rusoto_sqs = "0.41.0" +rusoto_s3 = "0.41.0" ``` ## Migration notes @@ -93,9 +93,9 @@ If you do not want to use OpenSSL, you can replace it with rustls by editing you ``` toml [dependencies] -rusoto_core = { version="0.40.0", default_features=false, features=["rustls"] } -rusoto_sqs = { version="0.40.0", default_features=false, features=["rustls"] } -rusoto_s3 = { version="0.40.0", default_features=false, features=["rustls"] } +rusoto_core = { version="0.41.0", default_features=false, features=["rustls"] } +rusoto_sqs = { version="0.41.0", default_features=false, features=["rustls"] } +rusoto_s3 = { version="0.41.0", default_features=false, features=["rustls"] } ``` ### Credentials diff --git a/rusoto/core/build.rs b/rusoto/core/build.rs index 6d05a80adb6..cbdd8c6067c 100644 --- a/rusoto/core/build.rs +++ b/rusoto/core/build.rs @@ -13,7 +13,7 @@ fn generate_user_agent_vars(output_path: &Path) { let rust_version = rustc_version::version().expect("Could not retrieve rustc version"); let mut f = File::create(&output_path.join("user_agent_vars.rs")) .expect("Could not create user agent file"); - f.write_all(format!("static RUST_VERSION: &'static str = \"{}\";", rust_version).as_bytes()) + f.write_all(format!("static RUST_VERSION: &str = \"{}\";", rust_version).as_bytes()) .expect("Unable to write user agent"); } diff --git a/rusoto/core/src/client.rs b/rusoto/core/src/client.rs index 7c8466e4880..f2036a95135 100644 --- a/rusoto/core/src/client.rs +++ b/rusoto/core/src/client.rs @@ -3,7 +3,9 @@ use std::time::Duration; use futures::{Async, Future, Poll}; -use crate::credential::{CredentialsError, DefaultCredentialsProvider, ProvideAwsCredentials}; +use crate::credential::{ + Anonymous, CredentialsError, DefaultCredentialsProvider, ProvideAwsCredentials, StaticProvider, +}; use crate::error::RusotoError; use crate::future::{self, RusotoFuture}; use crate::request::{DispatchSignedRequest, HttpClient, HttpDispatchError, HttpResponse}; @@ -31,7 +33,7 @@ impl Client { DefaultCredentialsProvider::new().expect("failed to create credentials provider"); let dispatcher = HttpClient::new().expect("failed to create request dispatcher"); let inner = Arc::new(ClientInner { - credentials_provider: Arc::new(credentials_provider), + credentials_provider: Some(Arc::new(credentials_provider)), dispatcher: Arc::new(dispatcher), }); *lock = Arc::downgrade(&inner); @@ -47,7 +49,26 @@ impl Client { D::Future: Send, { let inner = ClientInner { - credentials_provider: Arc::new(credentials_provider), + credentials_provider: Some(Arc::new(credentials_provider)), + dispatcher: Arc::new(dispatcher), + }; + Client { + inner: Arc::new(inner), + } + } + + /// Create a client from a request dispatcher without a credentials provider. The client will + /// neither fetch any default credentials nor sign any requests. A non-signing client can be + /// useful for calling APIs like `Sts::assume_role_with_web_identity` and + /// `Sts::assume_role_with_saml` which do not require any request signing or when calling + /// AWS compatible third party API endpoints which employ different authentication mechanisms. + pub fn new_not_signing(dispatcher: D) -> Self + where + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + let inner = ClientInner:: { + credentials_provider: None, dispatcher: Arc::new(dispatcher), }; Client { @@ -59,7 +80,9 @@ impl Client { pub fn sign_and_dispatch( &self, request: SignedRequest, - response_handler: fn(HttpResponse) -> Box> + Send>, + response_handler: fn( + HttpResponse, + ) -> Box> + Send>, ) -> RusotoFuture { future::new(self.inner.sign_and_dispatch(request), response_handler) } @@ -83,7 +106,7 @@ pub trait TimeoutFuture: Future { } struct ClientInner { - credentials_provider: Arc

, + credentials_provider: Option>, dispatcher: Arc, } @@ -159,9 +182,19 @@ where fn poll(&mut self) -> Poll { match self.state.take().unwrap() { - SignAndDispatchState::Lazy { request } => { - let future = self.inner.credentials_provider.credentials(); - self.state = Some(SignAndDispatchState::FetchingCredentials { future, request }); + SignAndDispatchState::Lazy { mut request } => { + match self.inner.credentials_provider.as_ref() { + Some(p) => { + let future = p.credentials(); + self.state = + Some(SignAndDispatchState::FetchingCredentials { future, request }); + } + None => { + request.complement_with_plus(true); + let future = self.inner.dispatcher.dispatch(request, self.timeout); + self.state = Some(SignAndDispatchState::Dispatching { future }); + } + } self.poll() } SignAndDispatchState::FetchingCredentials { @@ -175,7 +208,11 @@ where Ok(Async::NotReady) } Ok(Async::Ready(credentials)) => { - request.sign_with_plus(&credentials, true); + if credentials.is_anonymous() { + request.complement_with_plus(true); + } else { + request.sign_with_plus(&credentials, true); + } let future = self.inner.dispatcher.dispatch(request, self.timeout); self.state = Some(SignAndDispatchState::Dispatching { future }); self.poll() diff --git a/rusoto/core/src/lib.rs b/rusoto/core/src/lib.rs index d6c67263f04..bf56752e34a 100644 --- a/rusoto/core/src/lib.rs +++ b/rusoto/core/src/lib.rs @@ -31,19 +31,14 @@ extern crate lazy_static; #[macro_use] extern crate log; extern crate base64; -extern crate hex; -extern crate hmac; -extern crate md5; pub extern crate rusoto_credential as credential; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_json; -extern crate sha2; extern crate time; extern crate tokio; extern crate tokio_timer; -extern crate url; extern crate xml; mod client; @@ -52,8 +47,10 @@ mod future; mod stream; pub mod param; +#[doc(hidden)] pub mod region; pub mod request; +#[doc(hidden)] pub mod signature; #[doc(hidden)] diff --git a/rusoto/core/src/proto/json/error.rs b/rusoto/core/src/proto/json/error.rs index 40b2430a7f2..15bde9985e6 100644 --- a/rusoto/core/src/proto/json/error.rs +++ b/rusoto/core/src/proto/json/error.rs @@ -6,7 +6,7 @@ use super::super::super::request::BufferedHttpResponse; struct RawError { #[serde(rename = "__type", default)] typ: Option, - #[serde(default)] + #[serde(alias = "Message", default)] message: Option, } @@ -89,3 +89,23 @@ fn deserialize_dynamodb_error() { "Requested resource not found: Table: tablename not found" ); } + +#[test] +fn deserialize_athena_error() { + use http::StatusCode; + + let payload = r#"{"__type":"InvalidRequestException","AthenaErrorCode":"MALFORMED_QUERY","ErrorCode":"MALFORMED_QUERY","Message":"line 6:18: mismatched input '.' expecting {, ',', 'ADD', 'AS', 'ALL', 'SOME', 'ANY', 'WHERE', 'GROUP', 'ORDER', 'HAVING', 'LIMIT', 'AT', 'NO', 'SUBSTRING', 'POSITION', 'TINYINT', 'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'TIMESTAMP', 'INTERVAL', 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'SECOND', 'ZONE', 'JOIN', 'CROSS', 'INNER', 'LEFT', 'RIGHT', 'FULL', 'NATURAL', 'FILTER', 'OVER', 'PARTITION', 'RANGE', 'ROWS', 'PRECEDING', 'FOLLOWING', 'CURRENT', 'ROW', 'SCHEMA', 'COMMENT', 'VIEW', 'REPLACE', 'GRANT', 'REVOKE', 'PRIVILEGES', 'PUBLIC', 'OPTION', 'EXPLAIN', 'ANALYZE', 'FORMAT', 'TYPE', 'TEXT', 'GRAPHVIZ', 'LOGICAL', 'DISTRIBUTED', 'VALIDATE', 'SHOW', 'TABLES', 'VIEWS', 'SCHEMAS', 'CATALOGS', 'COLUMNS', 'COLUMN', 'USE', 'PARTITIONS', 'FUNCTIONS', 'UNION', 'EXCEPT', 'INTERSECT', 'TO', 'SYSTEM', 'BERNOULLI', 'POISSONIZED', 'TABLESAMPLE', 'ARRAY', 'MAP', 'SET', 'RESET', 'SESSION', 'DATA', 'START', 'TRANSACTION', 'COMMIT', 'ROLLBACK', 'WORK', 'ISOLATION', 'LEVEL', 'SERIALIZABLE', 'REPEATABLE', 'COMMITTED', 'UNCOMMITTED', 'READ', 'WRITE', 'ONLY', 'CALL', 'INPUT', 'OUTPUT', 'CASCADE', 'RESTRICT', 'INCLUDING', 'EXCLUDING', 'PROPERTIES', 'FUNCTION', 'RETURNS', 'LANGUAGE', 'OPTIONS', 'SCALAR', 'AGGREGATE', 'WINDOW', 'NFD', 'NFC', 'NFKD', 'NFKC', 'IF', 'NULLIF', 'COALESCE', IDENTIFIER, DIGIT_IDENTIFIER, QUOTED_IDENTIFIER, BACKQUOTED_IDENTIFIER}"}"#; + let response = BufferedHttpResponse { + status: StatusCode::NOT_FOUND, + body: payload.into(), + headers: Default::default(), + }; + + let error = Error::parse(&response).unwrap(); + + assert_eq!(error.typ, "InvalidRequestException"); + assert_eq!( + error.msg, + r#"line 6:18: mismatched input '.' expecting {, ',', 'ADD', 'AS', 'ALL', 'SOME', 'ANY', 'WHERE', 'GROUP', 'ORDER', 'HAVING', 'LIMIT', 'AT', 'NO', 'SUBSTRING', 'POSITION', 'TINYINT', 'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'TIMESTAMP', 'INTERVAL', 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'SECOND', 'ZONE', 'JOIN', 'CROSS', 'INNER', 'LEFT', 'RIGHT', 'FULL', 'NATURAL', 'FILTER', 'OVER', 'PARTITION', 'RANGE', 'ROWS', 'PRECEDING', 'FOLLOWING', 'CURRENT', 'ROW', 'SCHEMA', 'COMMENT', 'VIEW', 'REPLACE', 'GRANT', 'REVOKE', 'PRIVILEGES', 'PUBLIC', 'OPTION', 'EXPLAIN', 'ANALYZE', 'FORMAT', 'TYPE', 'TEXT', 'GRAPHVIZ', 'LOGICAL', 'DISTRIBUTED', 'VALIDATE', 'SHOW', 'TABLES', 'VIEWS', 'SCHEMAS', 'CATALOGS', 'COLUMNS', 'COLUMN', 'USE', 'PARTITIONS', 'FUNCTIONS', 'UNION', 'EXCEPT', 'INTERSECT', 'TO', 'SYSTEM', 'BERNOULLI', 'POISSONIZED', 'TABLESAMPLE', 'ARRAY', 'MAP', 'SET', 'RESET', 'SESSION', 'DATA', 'START', 'TRANSACTION', 'COMMIT', 'ROLLBACK', 'WORK', 'ISOLATION', 'LEVEL', 'SERIALIZABLE', 'REPEATABLE', 'COMMITTED', 'UNCOMMITTED', 'READ', 'WRITE', 'ONLY', 'CALL', 'INPUT', 'OUTPUT', 'CASCADE', 'RESTRICT', 'INCLUDING', 'EXCLUDING', 'PROPERTIES', 'FUNCTION', 'RETURNS', 'LANGUAGE', 'OPTIONS', 'SCALAR', 'AGGREGATE', 'WINDOW', 'NFD', 'NFC', 'NFKD', 'NFKC', 'IF', 'NULLIF', 'COALESCE', IDENTIFIER, DIGIT_IDENTIFIER, QUOTED_IDENTIFIER, BACKQUOTED_IDENTIFIER}"# + ); +} diff --git a/rusoto/core/src/proto/xml/util.rs b/rusoto/core/src/proto/xml/util.rs index dbf4da4180c..3c9830208d7 100644 --- a/rusoto/core/src/proto/xml/util.rs +++ b/rusoto/core/src/proto/xml/util.rs @@ -352,5 +352,4 @@ mod tests { find_start_element(&mut reader); assert_eq!(peek_at_name(&mut reader).unwrap(), "ListQueuesResponse"); } - } diff --git a/rusoto/core/src/region.rs b/rusoto/core/src/region.rs index c4c5a9bb96a..e2c98f1a938 100644 --- a/rusoto/core/src/region.rs +++ b/rusoto/core/src/region.rs @@ -1,399 +1,2 @@ -//! AWS Regions and helper functions. -//! -//! Mostly used for translating the Region enum to a string AWS accepts. -//! -//! For example: `UsEast1` to "us-east-1" - -use crate::credential::ProfileProvider; -use serde::ser::SerializeTuple; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use std; -use std::error::Error; -use std::fmt::{self, Display, Error as FmtError, Formatter}; -use std::str::FromStr; - -/// An AWS region. -/// -/// # Default -/// -/// `Region` implements the `Default` trait. Calling `Region::default()` will attempt to read the -/// `AWS_DEFAULT_REGION` or `AWS_REGION` environment variable. If it is malformed, it will fall back to `Region::UsEast1`. -/// If it is not present it will fallback on the value associated with the current profile in `~/.aws/config` or the file -/// specified by the `AWS_CONFIG_FILE` environment variable. If that is malformed of absent it will fall back on `Region::UsEast1` -/// -/// # AWS-compatible services -/// -/// `Region::Custom` can be used to connect to AWS-compatible services such as DynamoDB Local or Ceph. -/// -/// ``` -/// # use rusoto_core::Region; -/// Region::Custom { -/// name: "eu-east-3".to_owned(), -/// endpoint: "http://localhost:8000".to_owned(), -/// }; -/// ``` -/// -/// # Caveats -/// -/// `CnNorth1` is currently untested due to Rusoto maintainers not having access to AWS China. -#[derive(Clone, Debug, PartialEq)] -pub enum Region { - /// Region that covers the Eastern part of Asia Pacific - ApEast1, - - /// Region that covers the North-Eastern part of Asia Pacific - ApNortheast1, - - /// Region that covers the North-Eastern part of Asia Pacific - ApNortheast2, - - /// Region that covers the North-Eastern part of Asia Pacific - ApNortheast3, - - /// Region that covers the Southern part of Asia Pacific - ApSouth1, - - /// Region that covers the South-Eastern part of Asia Pacific - ApSoutheast1, - - /// Region that covers the South-Eastern part of Asia Pacific - ApSoutheast2, - - /// Region that covers Canada - CaCentral1, - - /// Region that covers Central Europe - EuCentral1, - - /// Region that covers Western Europe - EuWest1, - - /// Region that covers Western Europe - EuWest2, - - /// Region that covers Western Europe - EuWest3, - - /// Region that covers Northern Europe - EuNorth1, - - /// Region that covers South America - SaEast1, - - /// Region that covers the Eastern part of the United States - UsEast1, - - /// Region that covers the Eastern part of the United States - UsEast2, - - /// Region that covers the Western part of the United States - UsWest1, - - /// Region that covers the Western part of the United States - UsWest2, - - /// Region that covers the Eastern part of the United States for the US Government - UsGovEast1, - - /// Region that covers the Western part of the United States for the US Government - UsGovWest1, - - /// Region that covers China - CnNorth1, - - /// Region that covers North-Western part of China - CnNorthwest1, - - /// Specifies a custom region, such as a local Ceph target - Custom { - /// Name of the endpoint (e.g. `"eu-east-2"`). - name: String, - - /// Endpoint to be used. For instance, `"https://s3.my-provider.net"` or just - /// `"s3.my-provider.net"` (default scheme is https). - endpoint: String, - }, -} - -impl Region { - /// Name of the region - /// - /// ``` - /// # use rusoto_core::Region; - /// assert_eq!(Region::CaCentral1.name(), "ca-central-1"); - /// assert_eq!( - /// Region::Custom { name: "eu-east-3".to_owned(), endpoint: "s3.net".to_owned() }.name(), - /// "eu-east-3" - /// ); - /// ``` - pub fn name(&self) -> &str { - match *self { - Region::ApEast1 => "ap-east-1", - Region::ApNortheast1 => "ap-northeast-1", - Region::ApNortheast2 => "ap-northeast-2", - Region::ApNortheast3 => "ap-northeast-3", - Region::ApSouth1 => "ap-south-1", - Region::ApSoutheast1 => "ap-southeast-1", - Region::ApSoutheast2 => "ap-southeast-2", - Region::CaCentral1 => "ca-central-1", - Region::EuCentral1 => "eu-central-1", - Region::EuWest1 => "eu-west-1", - Region::EuWest2 => "eu-west-2", - Region::EuWest3 => "eu-west-3", - Region::EuNorth1 => "eu-north-1", - Region::SaEast1 => "sa-east-1", - Region::UsEast1 => "us-east-1", - Region::UsEast2 => "us-east-2", - Region::UsWest1 => "us-west-1", - Region::UsWest2 => "us-west-2", - Region::UsGovEast1 => "us-gov-east-1", - Region::UsGovWest1 => "us-gov-west-1", - Region::CnNorth1 => "cn-north-1", - Region::CnNorthwest1 => "cn-northwest-1", - Region::Custom { ref name, .. } => name, - } - } -} - -/// An error produced when attempting to convert a `str` into a `Region` fails. -#[derive(Debug, PartialEq)] -pub struct ParseRegionError { - message: String, -} - -// Manually created for lack of a way to flatten the `Region::Custom` variant -// Related: https://github.com/serde-rs/serde/issues/119 -impl Serialize for Region { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut seq = serializer.serialize_tuple(2)?; - if let Region::Custom { - ref endpoint, - ref name, - } = *self - { - seq.serialize_element(&name)?; - seq.serialize_element(&Some(&endpoint))?; - } else { - seq.serialize_element(self.name())?; - seq.serialize_element(&None as &Option<&str>)?; - } - seq.end() - } -} - -struct RegionVisitor; - -impl<'de> de::Visitor<'de> for RegionVisitor { - type Value = Region; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("sequence of (name, Some(endpoint))") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: de::SeqAccess<'de>, - { - let name: String = seq - .next_element()? - .ok_or_else(|| de::Error::custom("region is missing name"))?; - let endpoint: Option = match seq.next_element() { - Ok(o) => o, - Err(_) => None, - }; - match (name, endpoint) { - (name, Some(endpoint)) => Ok(Region::Custom { name, endpoint }), - (name, None) => name.parse().map_err(de::Error::custom), - } - } -} - -// Manually created for lack of a way to flatten the `Region::Custom` variant -// Related: https://github.com/serde-rs/serde/issues/119 -impl<'de> Deserialize<'de> for Region { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_tuple(2, RegionVisitor) - } -} - -impl FromStr for Region { - type Err = ParseRegionError; - - fn from_str(s: &str) -> Result { - let v: &str = &s.to_lowercase(); - match v { - "ap-east-1" | "apeast1" => Ok(Region::ApEast1), - "ap-northeast-1" | "apnortheast1" => Ok(Region::ApNortheast1), - "ap-northeast-2" | "apnortheast2" => Ok(Region::ApNortheast2), - "ap-northeast-3" | "apnortheast3" => Ok(Region::ApNortheast3), - "ap-south-1" | "apsouth1" => Ok(Region::ApSouth1), - "ap-southeast-1" | "apsoutheast1" => Ok(Region::ApSoutheast1), - "ap-southeast-2" | "apsoutheast2" => Ok(Region::ApSoutheast2), - "ca-central-1" | "cacentral1" => Ok(Region::CaCentral1), - "eu-central-1" | "eucentral1" => Ok(Region::EuCentral1), - "eu-west-1" | "euwest1" => Ok(Region::EuWest1), - "eu-west-2" | "euwest2" => Ok(Region::EuWest2), - "eu-west-3" | "euwest3" => Ok(Region::EuWest3), - "eu-north-1" | "eunorth1" => Ok(Region::EuNorth1), - "sa-east-1" | "saeast1" => Ok(Region::SaEast1), - "us-east-1" | "useast1" => Ok(Region::UsEast1), - "us-east-2" | "useast2" => Ok(Region::UsEast2), - "us-west-1" | "uswest1" => Ok(Region::UsWest1), - "us-west-2" | "uswest2" => Ok(Region::UsWest2), - "us-gov-east-1" | "usgoveast1" => Ok(Region::UsGovEast1), - "us-gov-west-1" | "usgovwest1" => Ok(Region::UsGovWest1), - "cn-north-1" | "cnnorth1" => Ok(Region::CnNorth1), - "cn-northwest-1" | "cnnorthwest1" => Ok(Region::CnNorthwest1), - s => Err(ParseRegionError::new(s)), - } - } -} - -impl ParseRegionError { - /// Parses a region given as a string literal into a type `Region' - pub fn new(input: &str) -> Self { - ParseRegionError { - message: format!("Not a valid AWS region: {}", input), - } - } -} - -impl Error for ParseRegionError { - fn description(&self) -> &str { - &self.message - } -} - -impl Display for ParseRegionError { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - write!(f, "{}", self.message) - } -} - -impl Default for Region { - fn default() -> Region { - match std::env::var("AWS_DEFAULT_REGION").or_else(|_| std::env::var("AWS_REGION")) { - Ok(ref v) => Region::from_str(v).unwrap_or(Region::UsEast1), - Err(_) => match ProfileProvider::region() { - Ok(Some(region)) => Region::from_str(®ion).unwrap_or(Region::UsEast1), - _ => Region::UsEast1, - }, - } - } -} - -#[cfg(test)] -mod tests { - extern crate serde_test; - use self::serde_test::{assert_tokens, Token}; - use super::*; - - #[test] - fn from_str() { - assert_eq!( - "foo" - .parse::() - .err() - .expect("Parsing foo as a Region was not an error") - .to_string(), - "Not a valid AWS region: foo".to_owned() - ); - assert_eq!("ap-east-1".parse(), Ok(Region::ApEast1)); - assert_eq!("ap-northeast-1".parse(), Ok(Region::ApNortheast1)); - assert_eq!("ap-northeast-2".parse(), Ok(Region::ApNortheast2)); - assert_eq!("ap-northeast-3".parse(), Ok(Region::ApNortheast3)); - assert_eq!("ap-south-1".parse(), Ok(Region::ApSouth1)); - assert_eq!("ap-southeast-1".parse(), Ok(Region::ApSoutheast1)); - assert_eq!("ap-southeast-2".parse(), Ok(Region::ApSoutheast2)); - assert_eq!("ca-central-1".parse(), Ok(Region::CaCentral1)); - assert_eq!("eu-central-1".parse(), Ok(Region::EuCentral1)); - assert_eq!("eu-west-1".parse(), Ok(Region::EuWest1)); - assert_eq!("eu-west-2".parse(), Ok(Region::EuWest2)); - assert_eq!("eu-west-3".parse(), Ok(Region::EuWest3)); - assert_eq!("eu-north-1".parse(), Ok(Region::EuNorth1)); - assert_eq!("sa-east-1".parse(), Ok(Region::SaEast1)); - assert_eq!("us-east-1".parse(), Ok(Region::UsEast1)); - assert_eq!("us-east-2".parse(), Ok(Region::UsEast2)); - assert_eq!("us-west-1".parse(), Ok(Region::UsWest1)); - assert_eq!("us-west-2".parse(), Ok(Region::UsWest2)); - assert_eq!("us-gov-east-1".parse(), Ok(Region::UsGovEast1)); - assert_eq!("us-gov-west-1".parse(), Ok(Region::UsGovWest1)); - assert_eq!("cn-north-1".parse(), Ok(Region::CnNorth1)); - assert_eq!("cn-northwest-1".parse(), Ok(Region::CnNorthwest1)); - } - - #[test] - fn region_serialize_deserialize() { - assert_tokens(&Region::ApEast1, &tokens_for_region("ap-east-1")); - assert_tokens(&Region::ApNortheast1, &tokens_for_region("ap-northeast-1")); - assert_tokens(&Region::ApNortheast2, &tokens_for_region("ap-northeast-2")); - assert_tokens(&Region::ApNortheast3, &tokens_for_region("ap-northeast-3")); - assert_tokens(&Region::ApSouth1, &tokens_for_region("ap-south-1")); - assert_tokens(&Region::ApSoutheast1, &tokens_for_region("ap-southeast-1")); - assert_tokens(&Region::ApSoutheast2, &tokens_for_region("ap-southeast-2")); - assert_tokens(&Region::CaCentral1, &tokens_for_region("ca-central-1")); - assert_tokens(&Region::EuCentral1, &tokens_for_region("eu-central-1")); - assert_tokens(&Region::EuWest1, &tokens_for_region("eu-west-1")); - assert_tokens(&Region::EuWest2, &tokens_for_region("eu-west-2")); - assert_tokens(&Region::EuWest3, &tokens_for_region("eu-west-3")); - assert_tokens(&Region::EuNorth1, &tokens_for_region("eu-north-1")); - assert_tokens(&Region::SaEast1, &tokens_for_region("sa-east-1")); - assert_tokens(&Region::UsEast1, &tokens_for_region("us-east-1")); - assert_tokens(&Region::UsEast2, &tokens_for_region("us-east-2")); - assert_tokens(&Region::UsWest1, &tokens_for_region("us-west-1")); - assert_tokens(&Region::UsWest2, &tokens_for_region("us-west-2")); - assert_tokens(&Region::UsGovEast1, &tokens_for_region("us-gov-east-1")); - assert_tokens(&Region::UsGovWest1, &tokens_for_region("us-gov-west-1")); - assert_tokens(&Region::CnNorth1, &tokens_for_region("cn-north-1")); - assert_tokens(&Region::CnNorthwest1, &tokens_for_region("cn-northwest-1")) - } - - fn tokens_for_region(name: &'static str) -> [Token; 4] { - [ - Token::Tuple { len: 2 }, - Token::String(name), - Token::None, - Token::TupleEnd, - ] - } - - #[test] - fn region_serialize_deserialize_custom() { - let custom_region = Region::Custom { - endpoint: "http://localhost:8000".to_owned(), - name: "eu-east-1".to_owned(), - }; - let expected = "[\"eu-east-1\",\"http://localhost:8000\"]"; - let region_deserialized = serde_json::to_string(&custom_region).unwrap(); - assert_eq!(region_deserialized, expected); - - let from_json = serde_json::de::from_str(®ion_deserialized).unwrap(); - assert_eq!(custom_region, from_json); - } - - #[test] - fn region_serialize_deserialize_standard() { - let r = Region::UsWest2; - let region_deserialized = serde_json::to_string(&r).unwrap(); - let expected = "[\"us-west-2\",null]"; - - assert_eq!(region_deserialized, expected); - - let from_json = serde_json::de::from_str(®ion_deserialized).unwrap(); - assert_eq!(r, from_json); - } - - #[test] - fn region_serialize_deserialize_standard_only_region_name() { - let r = Region::UsWest2; - let only_region_name = "[\"us-west-2\"]"; - let from_json = serde_json::de::from_str(&only_region_name).unwrap(); - assert_eq!(r, from_json); - } -} +// moved to rusoto_signature +pub use rusoto_signature::region::*; diff --git a/rusoto/core/src/request.rs b/rusoto/core/src/request.rs index 1e539726c53..e0668c881e8 100644 --- a/rusoto/core/src/request.rs +++ b/rusoto/core/src/request.rs @@ -17,19 +17,18 @@ use std::time::Duration; use crate::tls::HttpsConnector; use bytes::Bytes; use futures::{Async, Future, Poll, Stream}; -use http::header::{HeaderName, HeaderValue}; -use http::{HeaderMap, Method, StatusCode}; -use hyper::body::Payload; +use http::{HeaderMap, Request, StatusCode}; +use hyper::body::Body; use hyper::client::connect::Connect; +use hyper::client::Builder as HyperBuilder; use hyper::client::HttpConnector; use hyper::client::ResponseFuture as HyperResponseFuture; use hyper::Error as HyperError; -use hyper::{Body, Client as HyperClient, Request as HyperRequest, Response as HyperResponse}; +use hyper::{Client as HyperClient, Response as HyperResponse}; +use std::convert::TryInto; use tokio_timer::Timeout; -use log::Level::Debug; - -use crate::signature::{SignedRequest, SignedRequestPayload}; +use crate::signature::SignedRequest; use crate::stream::ByteStream; // Pulls in the statically generated rustc version. @@ -262,52 +261,9 @@ impl Future for HttpClientFuture { } } -struct HttpClientPayload { - inner: Option, -} - -impl Payload for HttpClientPayload { - type Data = io::Cursor; - type Error = io::Error; - - fn poll_data(&mut self) -> Poll, Self::Error> { - match self.inner { - None => Ok(Async::Ready(None)), - Some(SignedRequestPayload::Buffer(ref mut buffer)) => { - if buffer.is_empty() { - Ok(Async::Ready(None)) - } else { - Ok(Async::Ready(Some(io::Cursor::new(buffer.split_off(0))))) - } - } - Some(SignedRequestPayload::Stream(ref mut stream)) => match stream.poll()? { - Async::NotReady => Ok(Async::NotReady), - Async::Ready(None) => Ok(Async::Ready(None)), - Async::Ready(Some(buffer)) => Ok(Async::Ready(Some(io::Cursor::new(buffer)))), - }, - } - } - - fn is_end_stream(&self) -> bool { - match self.inner { - None => true, - Some(SignedRequestPayload::Buffer(ref buffer)) => buffer.is_empty(), - Some(SignedRequestPayload::Stream(_)) => false, - } - } - - fn content_length(&self) -> Option { - match self.inner { - None => Some(0), - Some(SignedRequestPayload::Buffer(ref buffer)) => Some(buffer.len() as u64), - Some(SignedRequestPayload::Stream(ref stream)) => stream.size_hint().map(|s| s as u64), - } - } -} - /// Http client for use with AWS services. pub struct HttpClient> { - inner: HyperClient, + inner: HyperClient, } impl HttpClient { @@ -370,6 +326,12 @@ where HttpClient { inner } } + + /// Alows for a custom builder and connector to be used with the HttpClient + pub fn from_builder(builder: HyperBuilder, connector: C) -> Self { + let inner = builder.build(connector); + HttpClient { inner } + } } /// Configuration options for the HTTP Client @@ -407,105 +369,29 @@ where type Future = HttpClientFuture; fn dispatch(&self, request: SignedRequest, timeout: Option) -> Self::Future { - let hyper_method = match request.method().as_ref() { - "POST" => Method::POST, - "PUT" => Method::PUT, - "DELETE" => Method::DELETE, - "GET" => Method::GET, - "HEAD" => Method::HEAD, - v => { - return HttpClientFuture(ClientFutureInner::Error(format!( - "Unsupported HTTP verb {}", - v - ))) - } - }; - - // translate the headers map to a format Hyper likes - let mut hyper_headers = HeaderMap::new(); - for h in request.headers().iter() { - let header_name = match h.0.parse::() { - Ok(name) => name, - Err(err) => { - return HttpClientFuture(ClientFutureInner::Error(format!( - "error parsing header name: {}", - err - ))); - } - }; - for v in h.1.iter() { - let header_value = match HeaderValue::from_bytes(v) { - Ok(value) => value, - Err(err) => { - return HttpClientFuture(ClientFutureInner::Error(format!( - "error parsing header value: {}", - err - ))); - } - }; - hyper_headers.append(&header_name, header_value); - } - } - - // Add a default user-agent header if one is not already present. - if !hyper_headers.contains_key("user-agent") { - hyper_headers.insert("user-agent", DEFAULT_USER_AGENT.parse().unwrap()); - } - - let mut final_uri = format!( - "{}://{}{}", - request.scheme(), - request.hostname(), - request.canonical_path() - ); - if !request.canonical_query_string().is_empty() { - final_uri = final_uri + &format!("?{}", request.canonical_query_string()); - } - - if log_enabled!(Debug) { - let payload = match request.payload { - Some(SignedRequestPayload::Buffer(ref payload_bytes)) => { - String::from_utf8(payload_bytes.as_ref().to_owned()) - .unwrap_or_else(|_| String::from("")) - } - Some(SignedRequestPayload::Stream(ref stream)) => { - format!("", stream.size_hint()) - } - None => "".to_owned(), - }; - - debug!( - "Full request: \n method: {}\n final_uri: {}\n payload: {}\nHeaders:\n", - hyper_method, final_uri, payload - ); - for (h, v) in hyper_headers.iter() { - debug!("{}:{:?}", h.as_str(), v); - } - } - - let mut http_request_builder = HyperRequest::builder(); - http_request_builder.method(hyper_method); - http_request_builder.uri(final_uri); - - let body = HttpClientPayload { - inner: request.payload, - }; - let mut http_request = match http_request_builder.body(body) { - Ok(request) => request, + let mut req: Request = match request.try_into() { + Ok(req) => req, Err(err) => { return HttpClientFuture(ClientFutureInner::Error(format!( "error building request: {}", err - ))); + ))) } }; - *http_request.headers_mut() = hyper_headers; + if !req.headers().contains_key("user-agent") { + req.headers_mut().insert( + "user-agent", + DEFAULT_USER_AGENT + .parse() + .expect("failed to parse user agent string"), + ); + } let inner = match timeout { - None => ClientFutureInner::Hyper(self.inner.request(http_request)), + None => ClientFutureInner::Hyper(self.inner.request(req)), Some(duration) => { - let future = Timeout::new(self.inner.request(http_request), duration); + let future = Timeout::new(self.inner.request(req), duration); ClientFutureInner::HyperWithTimeout(future) } }; diff --git a/rusoto/core/src/signature.rs b/rusoto/core/src/signature.rs index e757f714465..e1510cffb3f 100644 --- a/rusoto/core/src/signature.rs +++ b/rusoto/core/src/signature.rs @@ -1,1040 +1,2 @@ -//! AWS API request signatures. -//! -//! Follows [AWS Signature 4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) -//! algorithm. -//! -//! If needed, the request will be re-issued to a temporary redirect endpoint. This can happen with -//! newly created S3 buckets not in us-standard/us-east-1. - -use std::borrow::Cow; -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; -use std::fmt; -use std::str; -use std::time::Duration; - -use base64; -use bytes::Bytes; -use hex; -use hmac::{Hmac, Mac}; -use md5; -use sha2::{Digest, Sha256}; -use time::now_utc; -use time::Tm; -use url::percent_encoding::{percent_decode, utf8_percent_encode, EncodeSet}; - -use crate::credential::AwsCredentials; -use crate::param::{Params, ServiceParams}; -use crate::region::Region; -use crate::stream::ByteStream; - -/// Payload string to use for unsigned payload -pub static UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD"; -/// Payload string to use for signed empty payload -pub static EMPTY_SHA256_HASH: &str = - "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; - -/// Possible payloads included in a `SignedRequest`. -pub enum SignedRequestPayload { - /// Transfer payload in a single chunk - Buffer(Bytes), - /// Transfer payload in multiple chunks - Stream(ByteStream), -} - -impl fmt::Debug for SignedRequestPayload { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SignedRequestPayload::Buffer(ref buf) => { - write!(f, "SignedRequestPayload::Buffer(len = {})", buf.len()) - } - SignedRequestPayload::Stream(ref stream) => write!( - f, - "SignedRequestPayload::Stream(size_hint = {:?})", - stream.size_hint() - ), - } - } -} - -/// A data structure for all the elements of an HTTP request that are involved in -/// the Amazon Signature Version 4 signing process -#[derive(Debug)] -pub struct SignedRequest { - /// The HTTP Method - pub method: String, - /// The AWS Service - pub service: String, - /// The AWS Region - pub region: Region, - /// The HTTP request path - pub path: String, - /// The HTTP Request Headers - pub headers: BTreeMap>>, - /// The HTTP request paramaters - pub params: Params, - /// The HTTP/HTTPS protocol - pub scheme: Option, - /// The AWS hostname - pub hostname: Option, - /// The HTTP Content - pub payload: Option, - /// The Standardised query string - pub canonical_query_string: String, - /// The Standardised URI - pub canonical_uri: String, -} - -impl SignedRequest { - /// Default constructor - pub fn new(method: &str, service: &str, region: &Region, path: &str) -> SignedRequest { - SignedRequest { - method: method.to_string(), - service: service.to_string(), - region: region.clone(), - path: path.to_string(), - headers: BTreeMap::new(), - params: Params::new(), - scheme: None, - hostname: None, - payload: None, - canonical_query_string: String::new(), - canonical_uri: String::new(), - } - } - - /// Sets the value of the "content-type" header. - pub fn set_content_type(&mut self, content_type: String) { - self.add_header("content-type", &content_type); - } - - /// Sets the target hostname - pub fn set_hostname(&mut self, hostname: Option) { - self.hostname = hostname; - } - - /// Sets the target hostname using the current service type and region - /// - /// See the implementation of build_hostname to see how this is done - pub fn set_endpoint_prefix(&mut self, endpoint_prefix: String) { - self.hostname = Some(build_hostname(&endpoint_prefix, &self.region)); - } - - /// Sets the new body (payload) - pub fn set_payload>(&mut self, payload: Option) { - self.payload = payload.map(|chunk| SignedRequestPayload::Buffer(chunk.into())); - } - - /// Sets the new body (payload) as a stream - pub fn set_payload_stream(&mut self, stream: ByteStream) { - self.payload = Some(SignedRequestPayload::Stream(stream)); - } - - /// Computes and sets the Content-MD5 header based on the current payload. - /// - /// Has no effect if the payload is not set, or is not a buffer. - pub fn set_content_md5_header(&mut self) { - let digest; - if let Some(SignedRequestPayload::Buffer(ref payload)) = self.payload { - digest = Some(md5::compute(payload)); - } else { - digest = None; - } - if let Some(digest) = digest { - // need to deref digest and then pass that reference: - self.add_header("Content-MD5", &base64::encode(&(*digest))); - } - } - - /// Returns the current HTTP method - pub fn method(&self) -> &str { - &self.method - } - - /// Returns the current path - pub fn path(&self) -> &str { - &self.path - } - - /// Invokes `canonical_uri(path)` to return a canonical path - pub fn canonical_path(&self) -> String { - canonical_uri(&self.path, &self.region) - } - - /// Returns the current canonical URI - pub fn canonical_uri(&self) -> &str { - &self.canonical_uri - } - - /// Returns the current query string - /// - /// Converts a paramater such as "example param": "examplekey" into "&example+param=examplekey" - pub fn canonical_query_string(&self) -> &str { - &self.canonical_query_string - } - - /// Returns the current headers - pub fn headers(&self) -> &BTreeMap>> { - &self.headers - } - - /// Returns the current http scheme (https or http) - pub fn scheme(&self) -> String { - match self.scheme { - Some(ref p) => p.to_string(), - None => match self.region { - Region::Custom { ref endpoint, .. } => { - if endpoint.starts_with("http://") { - "http".to_owned() - } else { - "https".to_owned() - } - } - _ => "https".to_owned(), - }, - } - } - - /// Converts hostname to String if it exists, else it invokes build_hostname() - pub fn hostname(&self) -> String { - // hostname may be already set by an endpoint prefix - match self.hostname { - Some(ref h) => h.to_string(), - None => build_hostname(&self.service, &self.region), - } - } - - /// If the key exists in headers, set it to blank/unoccupied: - pub fn remove_header(&mut self, key: &str) { - let key_lower = key.to_ascii_lowercase().to_string(); - self.headers.remove(&key_lower); - } - - /// Add a value to the array of headers for the specified key. - /// Headers are kept sorted by key name for use at signing (BTreeMap) - pub fn add_header(&mut self, key: K, value: &str) { - let key_lower = key.to_string().to_ascii_lowercase(); - let value_vec = value.as_bytes().to_vec(); - - match self.headers.entry(key_lower) { - Entry::Vacant(entry) => { - let mut values = Vec::new(); - values.push(value_vec); - entry.insert(values); - } - Entry::Occupied(entry) => { - entry.into_mut().push(value_vec); - } - } - } - - /// Adds parameter to the HTTP Request - pub fn add_param(&mut self, key: S, value: S) - where - S: Into, - { - self.params.insert(key.into(), Some(value.into())); - } - - /// Sets paramaters with a given variable of `Params` type - pub fn set_params(&mut self, params: Params) { - self.params = params; - } - - /// Generate a Presigned URL for AWS - /// - /// See the [documentation](https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html) - /// for more information. - pub fn generate_presigned_url( - &mut self, - creds: &AwsCredentials, - expires_in: &Duration, - should_sha256_sign_payload: bool, - ) -> String { - debug!("Presigning request URL"); - - self.sign(creds); - let hostname = match self.hostname { - Some(ref h) => h.to_string(), - None => build_hostname(&self.service, &self.region), - }; - - let current_time = now_utc(); - let current_time_fmted = current_time.strftime("%Y%m%dT%H%M%SZ").unwrap(); - let current_time_fmted = format!("{}", ¤t_time_fmted); - let current_date = current_time.strftime("%Y%m%d").unwrap(); - - self.remove_header("X-Amz-Content-Sha256"); - - self.remove_header("X-Amz-Date"); - - self.remove_header("Content-Type"); - - if let Some(ref token) = *creds.token() { - self.remove_header("X-Amz-Security-Token"); - self.params - .put("X-Amz-Security-Token", encode_uri_strict(token)); - } - - self.remove_header("X-Amz-Algorithm"); - self.params.put("X-Amz-Algorithm", "AWS4-HMAC-SHA256"); - - self.remove_header("X-Amz-Credential"); - self.params.put( - "X-Amz-Credential", - format!( - "{}/{}/{}/{}/aws4_request", - &creds.aws_access_key_id(), - ¤t_date, - self.region.name(), - self.service - ), - ); - - self.remove_header("X-Amz-Expires"); - let expiration_time = format!("{}", expires_in.as_secs()); - self.params.put("X-Amz-Expires", expiration_time); - - self.canonical_uri = canonical_uri(&self.path, &self.region); - let canonical_headers = canonical_headers(&self.headers); - - let signed_headers = signed_headers(&self.headers); - self.params.put("X-Amz-SignedHeaders", &signed_headers); - - self.params.put("X-Amz-Date", current_time_fmted); - - self.canonical_query_string = build_canonical_query_string(&self.params); - - debug!("canonical_uri: {:?}", self.canonical_uri); - debug!("canonical_headers: {:?}", canonical_headers); - debug!("signed_headers: {:?}", signed_headers); - debug!("canonical_query_string: {:?}", self.canonical_query_string); - - let payload = if should_sha256_sign_payload { - match self.payload { - None => { - Cow::Borrowed(EMPTY_SHA256_HASH) - } - Some(SignedRequestPayload::Buffer(ref payload)) => { - let (digest, _len) = digest_payload(&payload); - Cow::Owned(digest) - } - Some(SignedRequestPayload::Stream(ref _stream)) => { - Cow::Borrowed(UNSIGNED_PAYLOAD) - } - } - } else { - Cow::Borrowed(UNSIGNED_PAYLOAD) - }; - - let canonical_request = format!( - "{}\n{}\n{}\n{}\n{}\n{}", - &self.method, - self.canonical_uri, - self.canonical_query_string, - canonical_headers, - &signed_headers, - payload - ); - - debug!("canonical_request: {:?}", canonical_request); - - // use the hashed canonical request to build the string to sign - let hashed_canonical_request = to_hexdigest(&canonical_request); - - debug!("hashed_canonical_request: {:?}", hashed_canonical_request); - - let scope = format!( - "{}/{}/{}/aws4_request", - current_date, - self.region.name(), - &self.service - ); - - debug!("scope: {}", scope); - - let string_to_sign = string_to_sign(current_time, &hashed_canonical_request, &scope); - - debug!("string_to_sign: {}", string_to_sign); - - let signature = sign_string( - &string_to_sign, - creds.aws_secret_access_key(), - current_time, - &self.region.name(), - &self.service, - ); - self.params.put("X-Amz-Signature", signature); - - format!( - "{}://{}{}?{}", - self.scheme(), - hostname, - self.canonical_uri, - build_canonical_query_string(&self.params) - ) - } - - /// Signs the request using Amazon Signature version 4 to verify identity. - /// Authorization header uses AWS4-HMAC-SHA256 for signing. - pub fn sign(&mut self, creds: &AwsCredentials) { - self.sign_with_plus(creds, false) - } - - /// Signs the request using Amazon Signature version 4 to verify identity. - /// Authorization header uses AWS4-HMAC-SHA256 for signing. - pub fn sign_with_plus(&mut self, creds: &AwsCredentials, should_treat_plus_literally: bool) { - debug!("Creating request to send to AWS."); - let hostname = match self.hostname { - Some(ref h) => h.to_string(), - None => build_hostname(&self.service, &self.region), - }; - - // Gotta remove and re-add headers since by default they append the value. If we're following - // a 307 redirect we end up with Three Stooges in the headers with duplicate values. - self.remove_header("host"); - self.add_header("host", &hostname); - - if let Some(ref token) = *creds.token() { - self.remove_header("X-Amz-Security-Token"); - self.add_header("X-Amz-Security-Token", token); - } - - self.canonical_query_string = - build_canonical_query_string_with_plus(&self.params, should_treat_plus_literally); - - let date = now_utc(); - self.remove_header("x-amz-date"); - self.add_header( - "x-amz-date", - &date.strftime("%Y%m%dT%H%M%SZ").unwrap().to_string(), - ); - - // if there's no content-type header set, set it to the default value - if let Entry::Vacant(entry) = self.headers.entry("content-type".to_owned()) { - let mut values = Vec::new(); - values.push(b"application/octet-stream".to_vec()); - entry.insert(values); - } - - // build the canonical request - let signed_headers = signed_headers(&self.headers); - self.canonical_uri = canonical_uri(&self.path, &self.region); - // Normalize URI paths according to RFC 3986. Remove redundant and relative path components. Each path segment must be URI-encoded twice (except for Amazon S3 which only gets URI-encoded once). - // see https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - let canonical_uri = if &self.service != "s3" { - utf8_percent_encode(&self.canonical_uri, StrictPathEncodeSet).collect::() - } else { - self.canonical_uri.clone() - }; - let canonical_headers = canonical_headers(&self.headers); - - let canonical_request: String; - - let (digest, len) = match self.payload { - None => { - canonical_request = format!( - "{}\n{}\n{}\n{}\n{}\n{}", - &self.method, - canonical_uri, - self.canonical_query_string, - canonical_headers, - signed_headers, - EMPTY_SHA256_HASH - ); - (Some(Cow::Borrowed(EMPTY_SHA256_HASH)), Some(0)) - } - Some(SignedRequestPayload::Buffer(ref payload)) => { - let (digest, len) = digest_payload(&payload); - canonical_request = format!( - "{}\n{}\n{}\n{}\n{}\n{}", - &self.method, - canonical_uri, - self.canonical_query_string, - canonical_headers, - signed_headers, - &digest - ); - (Some(Cow::Owned(digest)), Some(len)) - } - Some(SignedRequestPayload::Stream(ref stream)) => { - canonical_request = format!( - "{}\n{}\n{}\n{}\n{}\n{}", - &self.method, - canonical_uri, - self.canonical_query_string, - canonical_headers, - signed_headers, - UNSIGNED_PAYLOAD - ); - (Some(Cow::Borrowed(UNSIGNED_PAYLOAD)), stream.size_hint()) - } - }; - - if let Some(digest) = digest { - self.remove_header("x-amz-content-sha256"); - self.add_header("x-amz-content-sha256", &digest); - } - - if let Some(len) = len { - self.remove_header("content-length"); - self.add_header("content-length", &format!("{}", len)); - } - - // use the hashed canonical request to build the string to sign - let hashed_canonical_request = to_hexdigest(&canonical_request); - let scope = format!( - "{}/{}/{}/aws4_request", - date.strftime("%Y%m%d").unwrap(), - self.region.name(), - &self.service - ); - let string_to_sign = string_to_sign(date, &hashed_canonical_request, &scope); - - // sign the string - let signature = sign_string( - &string_to_sign, - creds.aws_secret_access_key(), - date, - &self.region.name(), - &self.service, - ); - - // build the actual auth header - let auth_header = format!( - "AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}", - &creds.aws_access_key_id(), - scope, - signed_headers, - signature - ); - self.remove_header("authorization"); - self.add_header("authorization", &auth_header); - } -} - -/// Convert payload from Char array to useable format. -fn digest_payload(payload: &[u8]) -> (String, usize) { - let digest = to_hexdigest(payload); - let len = payload.len(); - (digest, len) -} - -#[inline] -fn hmac(secret: &[u8], message: &[u8]) -> Hmac { - let mut hmac = Hmac::::new(secret).expect("failed to create hmac"); - hmac.input(message); - hmac -} - -/// Takes a message and signs it using AWS secret, time, region keys and service keys. -fn sign_string( - string_to_sign: &str, - secret: &str, - date: Tm, - region: &str, - service: &str, -) -> String { - let date_str = date.strftime("%Y%m%d").unwrap().to_string(); - let date_hmac = hmac(format!("AWS4{}", secret).as_bytes(), date_str.as_bytes()) - .result() - .code(); - let region_hmac = hmac(date_hmac.as_ref(), region.as_bytes()).result().code(); - let service_hmac = hmac(region_hmac.as_ref(), service.as_bytes()) - .result() - .code(); - let signing_hmac = hmac(service_hmac.as_ref(), b"aws4_request").result().code(); - hex::encode( - hmac(signing_hmac.as_ref(), string_to_sign.as_bytes()) - .result() - .code() - .as_ref(), - ) -} - -/// Mark string as AWS4-HMAC-SHA256 hashed -pub fn string_to_sign(date: Tm, hashed_canonical_request: &str, scope: &str) -> String { - format!( - "AWS4-HMAC-SHA256\n{}\n{}\n{}", - date.strftime("%Y%m%dT%H%M%SZ").unwrap(), - scope, - hashed_canonical_request - ) -} - -fn signed_headers(headers: &BTreeMap>>) -> String { - let mut signed = String::new(); - headers - .iter() - .filter(|&(ref key, _)| !skipped_headers(&key)) - .for_each(|(key, _)| { - if !signed.is_empty() { - signed.push(';'); - } - signed.push_str(key); - }); - signed -} - -/// Canonicalizes headers into the AWS Canonical Form. -/// -/// Read more about it: [HERE](http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html) -fn canonical_headers(headers: &BTreeMap>>) -> String { - let mut canonical = String::new(); - - for (key, value) in headers.iter() { - if skipped_headers(key) { - continue; - } - canonical.push_str(format!("{}:{}\n", key, canonical_values(value)).as_ref()); - } - canonical -} - -/// Canonicalizes values into the AWS Canonical Form. -/// -/// Read more about it: [HERE](http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html) -fn canonical_values(values: &[Vec]) -> String { - let mut st = String::new(); - for v in values { - let s = str::from_utf8(v).unwrap(); - if !st.is_empty() { - st.push(',') - } - if s.starts_with('\"') { - st.push_str(s); - } else { - st.push_str(s.replace(" ", " ").trim()); - } - } - st -} - -fn skipped_headers(header: &str) -> bool { - ["authorization", "content-length", "user-agent"].contains(&header) -} - -/// Returns standardised URI -fn canonical_uri(path: &str, region: &Region) -> String { - let endpoint_path = match region { - Region::Custom { ref endpoint, .. } => extract_endpoint_path(endpoint), - _ => None, - }; - match (endpoint_path, path) { - (Some(prefix), "") => prefix.to_string(), - (None, "") => "/".to_string(), - (Some(prefix), _) => encode_uri_path(&(prefix.to_owned() + path)), - _ => encode_uri_path(path), - } -} - -/// Canonicalizes query while iterating through the given paramaters -/// -/// Read more about it: [HERE](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html#query-string-auth-v4-signing) -fn build_canonical_query_string(params: &Params) -> String { - build_canonical_query_string_with_plus(params, false) -} - -/// Canonicalizes query while iterating through the given parameters. -/// -/// Read more about it: [HERE](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html#query-string-auth-v4-signing) -fn build_canonical_query_string_with_plus( - params: &Params, - should_treat_plus_literally: bool, -) -> String { - if params.is_empty() { - return String::new(); - } - - let mut output = String::new(); - for (key, val) in params.iter() { - if !output.is_empty() { - output.push_str("&"); - } - if should_treat_plus_literally { - output.push_str(&encode_uri_strict(&key)); - } else { - output.push_str(&encode_uri_strict(&key.replace("+", " "))); - } - output.push_str("="); - - if let Some(ref unwrapped_val) = *val { - if should_treat_plus_literally { - output.push_str(&encode_uri_strict(&unwrapped_val)); - } else { - output.push_str(&encode_uri_strict(&unwrapped_val.replace("+", " "))); - } - } - } - - output -} - -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// Do not URI-encode any of the unreserved characters that RFC 3986 defines: -// A-Z, a-z, 0-9, hyphen ( - ), underscore ( _ ), period ( . ), and tilde ( ~ ). -// -// Percent-encode all other characters with %XY, where X and Y are hexadecimal -// characters (0-9 and uppercase A-F). For example, the space character must be -// encoded as %20 (not using '+', as some encoding schemes do) and extended UTF-8 -// characters must be in the form %XY%ZA%BC -#[derive(Clone)] -/// This struct is used to maintain the strict URI encoding standard as proposed by RFC 3986 -pub struct StrictEncodeSet; - -impl EncodeSet for StrictEncodeSet { - #[inline] - fn contains(&self, byte: u8) -> bool { - let upper = byte >= 0x41 && byte <= 0x5a; - let lower = byte >= 0x61 && byte <= 0x7a; - let numeric = byte >= 0x30 && byte <= 0x39; - let hyphen = byte == 0x2d; - let underscore = byte == 0x5f; - let tilde = byte == 0x7e; - let period = byte == 0x2e; - !(upper || lower || numeric || hyphen || underscore || tilde || period) - } -} - -#[derive(Clone)] -/// This struct is used to maintain the URI path encoding -pub struct StrictPathEncodeSet; - -impl EncodeSet for StrictPathEncodeSet { - #[inline] - fn contains(&self, byte: u8) -> bool { - let slash = byte == b'/'; - !slash && StrictEncodeSet.contains(byte) - } -} - -#[inline] -#[doc(hidden)] -pub fn encode_uri_path(uri: &str) -> String { - utf8_percent_encode(uri, StrictPathEncodeSet).collect::() -} - -#[inline] -fn encode_uri_strict(uri: &str) -> String { - utf8_percent_encode(&decode_uri(uri), StrictEncodeSet).collect::() -} - -#[inline] -#[doc(hidden)] -pub fn decode_uri(uri: &str) -> String { - let decoder = percent_decode(uri.as_bytes()); - if let Ok(decoded) = decoder.decode_utf8() { - decoded.to_string() - } else { - uri.to_owned() - } -} - -fn to_hexdigest>(t: T) -> String { - let h = Sha256::digest(t.as_ref()); - hex::encode(h.as_ref()) -} - -fn extract_endpoint_path(endpoint: &str) -> Option<&str> { - extract_endpoint_components(endpoint).1 -} - -fn extract_endpoint_components(endpoint: &str) -> (&str, Option<&str>) { - let unschemed = endpoint - .find("://") - .map(|p| &endpoint[p + 3..]) - .unwrap_or(endpoint); - unschemed - .find('/') - .map(|p| (&unschemed[..p], Some(&unschemed[p..]))) - .unwrap_or((unschemed, None)) -} - -fn extract_hostname(endpoint: &str) -> &str { - extract_endpoint_components(endpoint).0 -} - -/// Takes a `Region` enum and a service and formas a vaild DNS name. -/// E.g. `Region::ApNortheast1` and `s3` produces `s3.ap-northeast-1.amazonaws.com.cn` -fn build_hostname(service: &str, region: &Region) -> String { - //iam & cloudfront have only 1 endpoint, other services have region-based endpoints - match service { - "iam" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - Region::CnNorth1 | Region::CnNorthwest1 => { - format!("{}.{}.amazonaws.com.cn", service, region.name()) - } - _ => format!("{}.amazonaws.com", service), - }, - "chime" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - _ => format!("service.{}.aws.amazon.com", service), - }, - "cloudfront" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - _ => format!("{}.amazonaws.com", service), - }, - "importexport" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - _ => "importexport.amazonaws.com".to_owned(), - }, - "s3" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - Region::UsEast1 => "s3.amazonaws.com".to_string(), - Region::CnNorth1 | Region::CnNorthwest1 => { - format!("s3.{}.amazonaws.com.cn", region.name()) - } - _ => format!("s3-{}.amazonaws.com", region.name()), - }, - "route53" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - _ => "route53.amazonaws.com".to_owned(), - }, - "sdb" => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - Region::UsEast1 => "sdb.amazonaws.com".to_string(), - _ => format!("sdb.{}.amazonaws.com", region.name()), - }, - _ => match *region { - Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), - Region::CnNorth1 | Region::CnNorthwest1 => { - format!("{}.{}.amazonaws.com.cn", service, region.name()) - } - _ => format!("{}.{}.amazonaws.com", service, region.name()), - }, - } -} - -#[cfg(test)] -mod tests { - use futures::Future; - use std::collections::BTreeMap; - use time::empty_tm; - - use crate::credential::{ProfileProvider, ProvideAwsCredentials}; - use crate::param::Params; - use crate::Region; - - use super::{build_canonical_query_string, SignedRequest}; - - #[test] - fn get_hostname_none_present() { - let request = SignedRequest::new("POST", "sqs", &Region::UsEast1, "/"); - assert_eq!("sqs.us-east-1.amazonaws.com", request.hostname()); - } - - #[test] - fn path_percent_encoded() { - let provider = ProfileProvider::with_configuration( - "test_resources/multiple_profile_credentials", - "foo", - ); - let mut request = SignedRequest::new( - "GET", - "s3", - &Region::UsEast1, - "/path with spaces: the sequel", - ); - request.sign(provider.credentials().wait().as_ref().unwrap()); - assert_eq!( - "/path%20with%20spaces%3A%20the%20sequel", - request.canonical_uri() - ); - } - #[test] - fn query_encoding_escaped_chars() { - query_encoding_escaped_chars_range(0u8, 45u8); // \0 to '-' - query_encoding_escaped_chars_range(47u8, 48u8); // '/' to '0' - query_encoding_escaped_chars_range(58u8, 65u8); // '0' to 'A' - query_encoding_escaped_chars_range(91u8, 95u8); // '[' to '_' - query_encoding_escaped_chars_range(96u8, 97u8); // '`' to 'a' - query_encoding_escaped_chars_range(123u8, 126u8); // '{' to '~' - query_encoding_escaped_chars_range(127u8, 128u8); // DEL - } - fn query_encoding_escaped_chars_range(start: u8, end: u8) { - let mut params = Params::new(); - for code in start..end { - params.insert("k".to_owned(), Some((code as char).to_string())); - let enc = build_canonical_query_string(¶ms); - let expected = if (code as char) == '+' { - "k=%20".to_owned() - } else { - format!("k=%{:02X}", code) - }; - assert_eq!(expected, enc); - } - } - #[test] - fn query_string_encoding_outliers() { - let mut request = SignedRequest::new( - "GET", - "s3", - &Region::UsEast1, - "/pathwith%20already%20existing%20encoding and some not encoded values", - ); - request.add_param("arg1%7B", "arg1%7B"); - request.add_param("arg2%7B+%2B", "+%2B"); - assert_eq!( - super::build_canonical_query_string(&request.params), - "arg1%7B=arg1%7B&arg2%7B%20%2B=%20%2B" - ); - assert_eq!( - super::canonical_uri(&request.path, &Region::default()), - "/pathwith%2520already%2520existing%2520encoding%20and%20some%20not%20encoded%20values" - ); - } - #[test] - fn query_percent_encoded() { - let mut request = SignedRequest::new( - "GET", - "s3", - &Region::UsEast1, - "/path with spaces: the sequel++", - ); - request.add_param( - "key:with@funny&characters", - "value with/funny%characters/Рускии", - ); - let canonical_query_string = super::build_canonical_query_string(&request.params); - assert_eq!("key%3Awith%40funny%26characters=value%20with%2Ffunny%25characters%2F%D0%A0%D1%83%D1%81%D0%BA%D0%B8%D0%B8", - canonical_query_string); - let canonical_uri_string = super::canonical_uri(&request.path, &Region::default()); - assert_eq!( - "/path%20with%20spaces%3A%20the%20sequel%2B%2B", - canonical_uri_string - ); - } - - #[test] - fn signature_generation() { - let signature_foo = super::sign_string( - "foo", - "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", - empty_tm(), - "us-west-1", - "s3", - ); - assert_eq!( - signature_foo, - "29673d1d856a7684ff6f0f53c542bae0bfbb1e564f531aff7568be9fd206383b".to_string() - ); - let signature_bar = super::sign_string( - "bar", - "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", - empty_tm(), - "us-west-1", - "s3", - ); - assert_eq!( - signature_bar, - "2ba6879cd9e769d73df721dc90aafdaa843005d23f5b6c91d0744f804962e44f".to_string() - ); - } - - #[test] - fn signed_headers_unsigned_first() { - let mut headers = BTreeMap::new(); - - // This header is excluded from signing - headers.insert("content-length".to_owned(), vec![vec![]]); - - headers.insert("content-type".to_owned(), vec![vec![]]); - headers.insert("x-amz-date".to_owned(), vec![vec![]]); - assert_eq!(super::signed_headers(&headers), "content-type;x-amz-date"); - } - - #[test] - fn signed_headers_unsigned_in_center() { - let mut headers = BTreeMap::new(); - headers.insert("cache-control".to_owned(), vec![vec![]]); - - // This header is excluded from signing - headers.insert("content-length".to_owned(), vec![vec![]]); - - headers.insert("content-type".to_owned(), vec![vec![]]); - headers.insert("host".to_owned(), vec![vec![]]); - headers.insert("x-amz-date".to_owned(), vec![vec![]]); - - assert_eq!( - super::signed_headers(&headers), - "cache-control;content-type;host;x-amz-date" - ); - } - - #[test] - fn signed_headers_unsigned_last() { - let mut headers = BTreeMap::new(); - headers.insert("cache-control".to_owned(), vec![vec![]]); - - // This header is excluded from signing - headers.insert("content-length".to_owned(), vec![vec![]]); - - assert_eq!(super::signed_headers(&headers), "cache-control"); - } - - #[test] - fn canonical_uri_combos() { - assert_eq!(super::canonical_uri("", &Region::default()), "/"); - assert_eq!(super::canonical_uri("/foo", &Region::default()), "/foo"); - assert_eq!( - super::canonical_uri( - "", - &Region::Custom { - name: Region::UsEast1.name().into(), - endpoint: "http://localhost:8000/path".into() - } - ), - "/path" - ); - assert_eq!( - super::canonical_uri( - "/foo", - &Region::Custom { - name: Region::UsEast1.name().into(), - endpoint: "http://localhost:8000/path".into() - } - ), - "/path/foo" - ); - assert_eq!( - super::canonical_uri( - "/foo", - &Region::Custom { - name: Region::UsEast1.name().into(), - endpoint: "http://localhost:8000".into() - } - ), - "/foo" - ); - } - - #[test] - fn extract_hostname() { - assert_eq!( - super::extract_hostname("hostname.with.no.scheme"), - "hostname.with.no.scheme" - ); - assert_eq!( - super::extract_hostname("http://hostname.with.scheme"), - "hostname.with.scheme" - ); - assert_eq!( - super::extract_hostname("https://hostname.with.scheme"), - "hostname.with.scheme" - ); - - assert_eq!( - super::extract_hostname("hostname.with.no.scheme/test"), - "hostname.with.no.scheme" - ); - assert_eq!( - super::extract_hostname("http://hostname.with.scheme/test"), - "hostname.with.scheme" - ); - assert_eq!( - super::extract_hostname("https://hostname.with.scheme/test"), - "hostname.with.scheme" - ); - } -} +// moved to rusoto_signature +pub use rusoto_signature::signature::*; diff --git a/rusoto/core/src/stream.rs b/rusoto/core/src/stream.rs index cc89639db72..3d2ce69c0fc 100644 --- a/rusoto/core/src/stream.rs +++ b/rusoto/core/src/stream.rs @@ -1,165 +1,2 @@ -use std::fmt; -use std::io; - -use bytes::Bytes; -use futures::{future, stream, Async, Future, Poll, Stream}; -use tokio::io::AsyncRead; - -/// Stream of bytes. -pub struct ByteStream { - size_hint: Option, - inner: Box + Send>, -} - -impl ByteStream { - /// Create a new `ByteStream` by wrapping a `futures` stream. - pub fn new(stream: S) -> ByteStream - where - S: Stream + Send + 'static, - { - ByteStream { - size_hint: None, - inner: Box::new(stream), - } - } - - pub(crate) fn size_hint(&self) -> Option { - self.size_hint - } - - /// Return an implementation of `AsyncRead` that uses async i/o to consume the stream. - pub fn into_async_read(self) -> impl AsyncRead + Send { - ImplAsyncRead::new(self.inner) - } - - /// Return an implementation of `Read` that uses blocking i/o to consume the stream. - pub fn into_blocking_read(self) -> impl io::Read + Send { - ImplBlockingRead::new(self.inner) - } -} - -impl From> for ByteStream { - fn from(buf: Vec) -> ByteStream { - ByteStream { - size_hint: Some(buf.len()), - inner: Box::new(stream::once(Ok(Bytes::from(buf)))), - } - } -} - -impl fmt::Debug for ByteStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "", self.size_hint) - } -} - -impl Stream for ByteStream { - type Item = Bytes; - type Error = io::Error; - - fn poll(&mut self) -> Poll, Self::Error> { - self.inner.poll() - } -} - -struct ImplAsyncRead { - buffer: io::Cursor, - stream: stream::Fuse + Send>>, -} - -impl ImplAsyncRead { - fn new(stream: Box + Send>) -> Self { - ImplAsyncRead { - buffer: io::Cursor::new(Bytes::new()), - stream: stream.fuse(), - } - } -} - -impl io::Read for ImplAsyncRead { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - if buf.is_empty() { - return Ok(0); - } - loop { - let n = self.buffer.read(buf)?; - if n > 0 { - return Ok(n); - } - match self.stream.poll()? { - Async::NotReady => { - return Err(io::ErrorKind::WouldBlock.into()); - } - Async::Ready(Some(buffer)) => { - self.buffer = io::Cursor::new(buffer); - continue; - } - Async::Ready(None) => { - return Ok(0); - } - } - } - } -} - -impl AsyncRead for ImplAsyncRead {} - -struct ImplBlockingRead { - inner: ImplAsyncRead, -} - -impl ImplBlockingRead { - fn new(stream: Box + Send>) -> Self { - ImplBlockingRead { - inner: ImplAsyncRead::new(stream), - } - } -} - -impl io::Read for ImplBlockingRead { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - future::poll_fn(|| self.inner.poll_read(buf)).wait() - } -} - -#[test] -fn test_async_read() { - use bytes::Bytes; - use std::io::Read; - - let chunks = vec![Bytes::from_static(b"1234"), Bytes::from_static(b"5678")]; - let stream = ByteStream::new(stream::iter_ok(chunks)); - let mut async_read = stream.into_async_read(); - - let mut buf = [0u8; 3]; - assert_eq!(async_read.read(&mut buf).unwrap(), 3); - assert_eq!(&buf[..3], b"123"); - assert_eq!(async_read.read(&mut buf).unwrap(), 1); - assert_eq!(&buf[..1], b"4"); - assert_eq!(async_read.read(&mut buf).unwrap(), 3); - assert_eq!(&buf[..3], b"567"); - assert_eq!(async_read.read(&mut buf).unwrap(), 1); - assert_eq!(&buf[..1], b"8"); - assert_eq!(async_read.read(&mut buf).unwrap(), 0); -} - -#[test] -fn test_blocking_read() { - use bytes::Bytes; - use std::io::Read; - - let chunks = vec![Bytes::from_static(b"1234"), Bytes::from_static(b"5678")]; - let stream = ByteStream::new(stream::iter_ok(chunks)); - let mut async_read = stream.into_blocking_read(); - - let mut buf = [0u8; 3]; - assert_eq!(async_read.read(&mut buf).unwrap(), 3); - assert_eq!(&buf[..3], b"123"); - assert_eq!(async_read.read(&mut buf).unwrap(), 1); - assert_eq!(&buf[..1], b"4"); - assert_eq!(async_read.read(&mut buf).unwrap(), 3); - assert_eq!(&buf[..3], b"567"); - assert_eq!(async_read.read(&mut buf).unwrap(), 1); - assert_eq!(&buf[..1], b"8"); - assert_eq!(async_read.read(&mut buf).unwrap(), 0); -} +// moved to rusoto_signature +pub use rusoto_signature::stream::*; diff --git a/rusoto/credential/Cargo.toml b/rusoto/credential/Cargo.toml index ba662f894d0..59bdea19952 100644 --- a/rusoto/credential/Cargo.toml +++ b/rusoto/credential/Cargo.toml @@ -11,13 +11,13 @@ documentation = "https://docs.rs/rusoto_credential" name = "rusoto_credential" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.1" exclude = ["tests/sample-data/*"] edition = "2018" [badges] -appveyor = { repository = "rusoto/rusoto", branch = "master" } -azure-devops = { project = "Rusoto", pipeline = "rusoto.rusoto", build="1" } +appveyor = { repository = "matthewkmayer/rusoto", branch = "master" } +azure-devops = { project = "matthewkmayer/Rusoto", pipeline = "rusoto.rusoto", build="1" } [dependencies] chrono = { version = "0.4.0", features = ["serde"] } @@ -31,10 +31,13 @@ serde_derive = "1.0.2" shlex = "0.1.1" tokio-process = "0.2.3" tokio-timer = "0.2.6" +lazy_static = "1.4.0" [dev-dependencies] -lazy_static = "1.0" quickcheck = "0.6" +tokio-core = "0.1" +rusoto_core = { path = "../core" } +rusoto_s3 = { path = "../services/s3" } [dependencies.clippy] optional = true diff --git a/rusoto/credential/src/container.rs b/rusoto/credential/src/container.rs index b8e8d6bce09..d95a79f2346 100644 --- a/rusoto/credential/src/container.rs +++ b/rusoto/credential/src/container.rs @@ -170,13 +170,13 @@ fn new_request(uri: &str, env_var_name: &str) -> Result, Credentia #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{lock, ENV_MUTEX}; + use crate::test_utils::lock_env; use std::env; #[test] fn request_from_relative_uri() { let path = "/xxx"; - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI, path); env::set_var(AWS_CONTAINER_CREDENTIALS_FULL_URI, "dummy"); env::set_var(AWS_CONTAINER_AUTHORIZATION_TOKEN, "dummy"); @@ -192,7 +192,7 @@ mod tests { #[test] fn error_from_missing_env_vars() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI); env::remove_var(AWS_CONTAINER_CREDENTIALS_FULL_URI); let result = request_from_env_vars(); @@ -201,7 +201,7 @@ mod tests { #[test] fn error_from_empty_env_vars() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI, ""); env::set_var(AWS_CONTAINER_CREDENTIALS_FULL_URI, ""); env::set_var(AWS_CONTAINER_AUTHORIZATION_TOKEN, ""); @@ -215,7 +215,7 @@ mod tests { #[test] fn request_from_full_uri_with_token() { let url = "http://localhost/xxx"; - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI); env::set_var(AWS_CONTAINER_CREDENTIALS_FULL_URI, url); env::set_var(AWS_CONTAINER_AUTHORIZATION_TOKEN, "dummy"); @@ -231,7 +231,7 @@ mod tests { #[test] fn request_from_full_uri_without_token() { let url = "http://localhost/xxx"; - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI); env::set_var(AWS_CONTAINER_CREDENTIALS_FULL_URI, url); env::remove_var(AWS_CONTAINER_AUTHORIZATION_TOKEN); @@ -246,7 +246,7 @@ mod tests { #[test] fn request_from_full_uri_with_empty_token() { let url = "http://localhost/xxx"; - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI); env::set_var(AWS_CONTAINER_CREDENTIALS_FULL_URI, url); env::set_var(AWS_CONTAINER_AUTHORIZATION_TOKEN, ""); diff --git a/rusoto/credential/src/environment.rs b/rusoto/credential/src/environment.rs index c3079f0ae04..1692b46c75b 100644 --- a/rusoto/credential/src/environment.rs +++ b/rusoto/credential/src/environment.rs @@ -42,6 +42,7 @@ use crate::{non_empty_env_var, AwsCredentials, CredentialsError, ProvideAwsCrede /// env::set_var("AWS_ACCESS_KEY_ID", "ANTN35UAENTS5UIAEATD"); /// env::set_var("AWS_SECRET_ACCESS_KEY", "TtnuieannGt2rGuie2t8Tt7urarg5nauedRndrur"); /// env::set_var("AWS_SESSION_TOKEN", "DfnGs8Td4rT8r4srxAg6Td4rT8r4srxAg6GtkTir"); +/// env::remove_var("AWS_CREDENTIAL_EXPIRATION"); /// /// let creds = EnvironmentProvider::default().credentials().wait().unwrap(); /// @@ -191,7 +192,7 @@ fn get_critical_variable(var_name: String) -> Result { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{lock, ENV_MUTEX}; + use crate::test_utils::lock_env; use chrono::Utc; use std::env; @@ -206,7 +207,7 @@ mod tests { #[test] fn get_temporary_credentials_from_env() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_ACCESS_KEY_ID, "id"); env::set_var(AWS_SECRET_ACCESS_KEY, "secret"); env::set_var(AWS_SESSION_TOKEN, "token"); @@ -223,7 +224,7 @@ mod tests { #[test] fn get_non_temporary_credentials_from_env() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_ACCESS_KEY_ID, "id"); env::set_var(AWS_SECRET_ACCESS_KEY, "secret"); env::remove_var(AWS_SESSION_TOKEN); @@ -239,7 +240,7 @@ mod tests { #[test] fn environment_provider_missing_key_id() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_ACCESS_KEY_ID); env::set_var(AWS_SECRET_ACCESS_KEY, "secret"); env::remove_var(AWS_SESSION_TOKEN); @@ -254,7 +255,7 @@ mod tests { #[test] fn environment_provider_missing_secret() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_SECRET_ACCESS_KEY); env::set_var(AWS_ACCESS_KEY_ID, "id"); env::remove_var(AWS_SESSION_TOKEN); @@ -269,7 +270,7 @@ mod tests { #[test] fn environment_provider_missing_credentials() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_SECRET_ACCESS_KEY); env::remove_var(AWS_ACCESS_KEY_ID); env::remove_var(AWS_SESSION_TOKEN); @@ -283,7 +284,7 @@ mod tests { #[test] fn environment_provider_bad_expiration() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_ACCESS_KEY_ID, "id"); env::set_var(AWS_SECRET_ACCESS_KEY, "secret"); env::set_var(AWS_SESSION_TOKEN, "token"); @@ -302,7 +303,7 @@ mod tests { #[test] fn get_temporary_credentials_with_expiration_from_env() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let now = Utc::now(); let now_str = now.to_rfc3339(); env::set_var(AWS_ACCESS_KEY_ID, "id"); @@ -324,7 +325,7 @@ mod tests { #[test] fn regression_test_rfc_3339_compat() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); // RFC 3339 expiration times with lower case 't' could not be parsed by earlier // implementations. env::set_var(AWS_CREDENTIAL_EXPIRATION, "1996-12-19t16:39:57-08:00"); @@ -346,7 +347,7 @@ mod tests { // NOTE: not strictly neccessary here, since we are using a non-standard // prefix, so we shouldn't collide with the other env interactions in // the other tests. - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let now = Utc::now(); let now_str = now.to_rfc3339(); diff --git a/rusoto/credential/src/instance_metadata.rs b/rusoto/credential/src/instance_metadata.rs index 6dd3deda63e..c44f7cb0dfa 100644 --- a/rusoto/credential/src/instance_metadata.rs +++ b/rusoto/credential/src/instance_metadata.rs @@ -19,7 +19,7 @@ const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-crede /// The provider has a default timeout of 30 seconds. While it should work well for most setups, /// you can change the timeout using the `set_timeout` method. /// -/// # Example +/// # Examples /// /// ```rust /// extern crate rusoto_credential; @@ -36,10 +36,29 @@ const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-crede /// // ... /// } /// ``` +/// +/// The source location can be changed from the default of 169.254.169.254: +/// +/// ```rust +/// extern crate rusoto_credential; +/// +/// use std::time::Duration; +/// +/// use rusoto_credential::InstanceMetadataProvider; +/// +/// fn main() { +/// let mut provider = InstanceMetadataProvider::new(); +/// // you can overwrite the default endpoint like this: +/// provider.set_ip_addr_with_port("127.0.0.1", "8080"); +/// +/// // ... +/// } +/// ``` #[derive(Clone, Debug)] pub struct InstanceMetadataProvider { client: HttpClient, timeout: Duration, + metadata_ip_addr: String, } impl InstanceMetadataProvider { @@ -48,6 +67,7 @@ impl InstanceMetadataProvider { InstanceMetadataProvider { client: HttpClient::new(), timeout: Duration::from_secs(30), + metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(), } } @@ -55,6 +75,11 @@ impl InstanceMetadataProvider { pub fn set_timeout(&mut self, timeout: Duration) { self.timeout = timeout; } + + /// Allow overriding host and port of instance metadata service. + pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) { + self.metadata_ip_addr = format!("{}:{}", ip, port.to_string()); + } } impl Default for InstanceMetadataProvider { @@ -75,6 +100,7 @@ pub struct InstanceMetadataProviderFuture { state: InstanceMetadataFutureState, client: HttpClient, timeout: Duration, + metadata_ip_addr: String, } impl Future for InstanceMetadataProviderFuture { @@ -84,12 +110,18 @@ impl Future for InstanceMetadataProviderFuture { fn poll(&mut self) -> Poll { let new_state = match self.state { InstanceMetadataFutureState::Start => { - let new_future = get_role_name(&self.client, self.timeout)?; + let new_future = + get_role_name(&self.client, self.timeout, self.metadata_ip_addr.clone())?; InstanceMetadataFutureState::GetRoleName(new_future) } InstanceMetadataFutureState::GetRoleName(ref mut future) => { let role_name = try_ready!(future.poll()); - let new_future = get_credentials_from_role(&self.client, self.timeout, &role_name)?; + let new_future = get_credentials_from_role( + &self.client, + self.timeout, + &role_name, + self.metadata_ip_addr.clone(), + )?; InstanceMetadataFutureState::GetCredentialsFromRole(new_future) } InstanceMetadataFutureState::GetCredentialsFromRole(ref mut future) => { @@ -114,6 +146,7 @@ impl ProvideAwsCredentials for InstanceMetadataProvider { state: InstanceMetadataFutureState::Start, client: self.client.clone(), timeout: self.timeout, + metadata_ip_addr: self.metadata_ip_addr.clone(), } } } @@ -122,11 +155,9 @@ impl ProvideAwsCredentials for InstanceMetadataProvider { fn get_role_name( client: &HttpClient, timeout: Duration, + ip_addr: String, ) -> Result { - let role_name_address = format!( - "http://{}/{}/", - AWS_CREDENTIALS_PROVIDER_IP, AWS_CREDENTIALS_PROVIDER_PATH - ); + let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH); let uri = match role_name_address.parse::() { Ok(u) => u, Err(e) => return Err(CredentialsError::new(e)), @@ -140,10 +171,11 @@ fn get_credentials_from_role( client: &HttpClient, timeout: Duration, role_name: &str, + ip_addr: String, ) -> Result { let credentials_provider_url = format!( "http://{}/{}/{}", - AWS_CREDENTIALS_PROVIDER_IP, AWS_CREDENTIALS_PROVIDER_PATH, role_name + ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name ); let uri = match credentials_provider_url.parse::() { diff --git a/rusoto/credential/src/lib.rs b/rusoto/credential/src/lib.rs index 65cbc8b5ca9..af32c10b419 100644 --- a/rusoto/credential/src/lib.rs +++ b/rusoto/credential/src/lib.rs @@ -50,15 +50,49 @@ use futures::future::{err, Either, Shared, SharedItem}; use futures::{Async, Future, Poll}; use hyper::Error as HyperError; +/// Representation of anonymity +pub trait Anonymous { + /// Return true if a type is anonymous, false otherwise + fn is_anonymous(&self) -> bool; +} + +impl Anonymous for AwsCredentials { + fn is_anonymous(&self) -> bool { + self.aws_access_key_id().is_empty() && self.aws_secret_access_key().is_empty() + } +} + /// AWS API access credentials, including access key, secret key, token (for IAM profiles), /// expiration timestamp, and claims from federated login. -#[derive(Clone, Deserialize)] +/// +/// # Anonymous example +/// +/// Some AWS services, like [s3](https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html) +/// do not require authenticated credential identity. For these +/// cases you can use a default set which are considered anonymous +/// +/// ```rust,2018edition +/// use rusoto_core::request::HttpClient; +/// use rusoto_s3::S3Client; +/// use rusoto_credential::{StaticProvider, AwsCredentials}; +/// # use std::error::Error; +/// +/// # fn main() -> Result<(), Box> { +/// let s3 = S3Client::new_with( +/// HttpClient::new()?, +/// StaticProvider::from(AwsCredentials::default()), +/// Default::default() +/// ); +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Deserialize, Default)] pub struct AwsCredentials { #[serde(rename = "AccessKeyId")] key: String, #[serde(rename = "SecretAccessKey")] secret: String, - #[serde(rename = "Token")] + #[serde(rename = "SessionToken", alias = "Token")] token: Option, #[serde(rename = "Expiration")] expires_at: Option>, @@ -544,11 +578,21 @@ mod tests { use std::io::Read; use std::path::Path; - use crate::test_utils::{is_secret_hidden_behind_asterisks, lock, ENV_MUTEX, SECRET}; + use crate::test_utils::{is_secret_hidden_behind_asterisks, lock_env, SECRET}; use futures::Future; use super::*; + #[test] + fn default_empty_credentials_are_considered_anonymous() { + assert!(AwsCredentials::default().is_anonymous()) + } + + #[test] + fn credentials_with_values_are_not_considered_anonymous() { + assert!(!AwsCredentials::new("foo", "bar", None, None).is_anonymous()) + } + #[test] fn providers_are_send_and_sync() { fn is_send_and_sync() {} @@ -568,7 +612,7 @@ mod tests { #[test] fn profile_provider_finds_right_credentials_in_file() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let profile_provider = ProfileProvider::with_configuration( "tests/sample-data/multiple_profile_credentials", "foo", @@ -615,6 +659,7 @@ mod tests { credentials.aws_secret_access_key(), "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" ); + assert!(credentials.token().is_some()); assert_eq!( credentials.expires_at().expect(""), diff --git a/rusoto/credential/src/profile.rs b/rusoto/credential/src/profile.rs index 79b73750a65..0a2f2726aef 100644 --- a/rusoto/credential/src/profile.rs +++ b/rusoto/credential/src/profile.rs @@ -1,7 +1,6 @@ //! The Credentials Provider for Credentials stored in a profile inside of a Credentials file. use std::collections::HashMap; -use std::fs; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::{Path, PathBuf}; @@ -10,6 +9,7 @@ use std::process::Command; use dirs::home_dir; use futures::future::{result, FutureResult}; use futures::{Future, Poll}; +use lazy_static::lazy_static; use regex::Regex; use tokio_process::{CommandExt, OutputAsync}; @@ -21,6 +21,11 @@ const AWS_SHARED_CREDENTIALS_FILE: &str = "AWS_SHARED_CREDENTIALS_FILE"; const DEFAULT: &str = "default"; const REGION: &str = "region"; +lazy_static! { + static ref PROFILE_REGEX: Regex = + Regex::new(r"^\[(profile )?([^\]]+)\]$").expect("Failed to compile regex"); +} + /// Provides AWS credentials from a profile in a credentials file, or from a credential process. /// /// # Warning @@ -207,7 +212,7 @@ impl ProvideAwsCredentials for ProfileProvider { let inner = match ProfileProvider::default_config_location().map(|location| { parse_config_file(&location).and_then(|config| { config - .get(&ProfileProvider::default_profile_name()) + .get(&self.profile) .and_then(|props| props.get("credential_process")) .map(std::borrow::ToOwned::to_owned) }) @@ -256,21 +261,14 @@ fn parse_credential_process_output(v: &[u8]) -> Result Regex { - Regex::new(r"^\[(profile )?([^\]]+)\]$").expect("Failed to compile regex") -} +fn parse_config_file

(file_path: P) -> Option>> +where + P: AsRef, +{ + if !file_path.as_ref().exists() || !file_path.as_ref().is_file() { + return None; + } -fn parse_config_file(file_path: &Path) -> Option>> { - match fs::metadata(file_path) { - Err(_) => return None, - Ok(metadata) => { - if !metadata.is_file() { - return None; - } - } - }; - let profile_regex = new_profile_regex(); let file = File::open(file_path).expect("expected file"); let file_lines = BufReader::new(&file); let result: (HashMap>, Option) = file_lines @@ -279,11 +277,11 @@ fn parse_config_file(file_path: &Path) -> Option Option -fn parse_credentials_file( - file_path: &Path, -) -> Result, CredentialsError> { - match fs::metadata(file_path) { - Err(_) => { - return Err(CredentialsError::new(format!( - "Couldn't stat credentials file: [ {:?} ]. Non existant, or no permission.", - file_path +fn parse_credentials_file

( + file_path: P, +) -> Result, CredentialsError> +where + P: AsRef, +{ + let profiles: HashMap<_, _> = parse_config_file(&file_path) + .map(|data| { + Ok(data + .into_iter() + .filter_map(|(profile, properties)| { + if let (Some(key), Some(secret)) = ( + properties.get("aws_access_key_id"), + properties.get("aws_secret_access_key"), + ) { + Some(( + profile, + AwsCredentials::new( + key, + secret, + properties + .get("aws_session_token") + .or_else(|| properties.get("aws_security_token")) + .map(String::to_owned), + None, + ), + )) + } else { + None + } + }) + .collect()) + }) + .unwrap_or_else(|| { + Err(CredentialsError::new(format!( + "Invalid credentials file: [ {} ].", + file_path.as_ref().display() ))) - } - Ok(metadata) => { - if !metadata.is_file() { - return Err(CredentialsError::new(format!( - "Credentials file: [ {:?} ] is not a file.", - file_path - ))); - } - } - }; - - let file = File::open(file_path)?; - - let profile_regex = new_profile_regex(); - let mut profiles: HashMap = HashMap::new(); - let mut access_key: Option = None; - let mut secret_key: Option = None; - let mut token: Option = None; - let mut profile_name: Option = None; - - let file_lines = BufReader::new(&file); - for (line_no, line) in file_lines.lines().enumerate() { - let unwrapped_line: String = - line.unwrap_or_else(|_| panic!("Failed to read credentials file, line: {}", line_no)); - - // skip empty lines - if unwrapped_line.is_empty() { - continue; - } - - // skip comments - if unwrapped_line.starts_with('#') { - continue; - } - - // handle the opening of named profile blocks - if profile_regex.is_match(&unwrapped_line) { - if profile_name.is_some() && access_key.is_some() && secret_key.is_some() { - let creds = - AwsCredentials::new(access_key.unwrap(), secret_key.unwrap(), token, None); - profiles.insert(profile_name.unwrap(), creds); - } - - access_key = None; - secret_key = None; - token = None; - - let caps = profile_regex.captures(&unwrapped_line).unwrap(); - profile_name = Some(caps.get(2).unwrap().as_str().to_string()); - continue; - } - - // otherwise look for key=value pairs we care about - let lower_case_line = unwrapped_line.to_ascii_lowercase().to_string(); - - if lower_case_line.contains("aws_access_key_id") && access_key.is_none() { - let v: Vec<&str> = unwrapped_line.split('=').collect(); - if !v.is_empty() { - access_key = Some(v[1].trim_matches(' ').to_string()); - } - } else if lower_case_line.contains("aws_secret_access_key") && secret_key.is_none() { - let v: Vec<&str> = unwrapped_line.split('=').collect(); - if !v.is_empty() { - secret_key = Some(v[1].trim_matches(' ').to_string()); - } - } else if lower_case_line.contains("aws_session_token") && token.is_none() { - let v: Vec<&str> = unwrapped_line.split('=').collect(); - if !v.is_empty() { - token = Some(v[1].trim_matches(' ').to_string()); - } - } else if lower_case_line.contains("aws_security_token") { - if token.is_none() { - let v: Vec<&str> = unwrapped_line.split('=').collect(); - if !v.is_empty() { - token = Some(v[1].trim_matches(' ').to_string()); - } - } - } else { - // Ignore unrecognized fields - continue; - } - } - - if profile_name.is_some() && access_key.is_some() && secret_key.is_some() { - let creds = AwsCredentials::new(access_key.unwrap(), secret_key.unwrap(), token, None); - profiles.insert(profile_name.unwrap(), creds); - } + })?; if profiles.is_empty() { - return Err(CredentialsError::new("No credentials found.")); + Err(CredentialsError::new("No credentials found.")) + } else { + Ok(profiles) } - - Ok(profiles) } fn parse_command_str(s: &str) -> Result { @@ -430,7 +371,7 @@ mod tests { use std::path::Path; use super::*; - use crate::test_utils::{lock, ENV_MUTEX}; + use crate::test_utils::lock_env; use crate::{CredentialsError, ProvideAwsCredentials}; #[test] @@ -466,6 +407,7 @@ mod tests { .expect("No bar profile in multiple_profile_credentials"); assert_eq!(bar_profile.get(REGION), Some(&"us-east-4".to_string())); assert_eq!(bar_profile.get("output"), Some(&"json".to_string())); + assert_eq!(bar_profile.get("# comments"), None); } #[test] @@ -474,7 +416,7 @@ mod tests { super::parse_config_file(Path::new("tests/sample-data/credential_process_config")); assert!(result.is_some()); let profiles = result.unwrap(); - assert_eq!(profiles.len(), 1); + assert_eq!(profiles.len(), 2); let default_profile = profiles .get(DEFAULT) .expect("No Default profile in default_profile_credentials"); @@ -543,7 +485,7 @@ mod tests { #[test] fn profile_provider_happy_path() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let provider = ProfileProvider::with_configuration( "tests/sample-data/multiple_profile_credentials", "foo", @@ -559,7 +501,7 @@ mod tests { #[test] fn profile_provider_via_environment_variable() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let credentials_path = "tests/sample-data/default_profile_credentials"; env::set_var(AWS_SHARED_CREDENTIALS_FILE, credentials_path); let result = ProfileProvider::new(); @@ -571,7 +513,7 @@ mod tests { #[test] fn profile_provider_profile_name_via_environment_variable() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let credentials_path = "tests/sample-data/multiple_profile_credentials"; env::set_var(AWS_SHARED_CREDENTIALS_FILE, credentials_path); env::set_var(AWS_PROFILE, "bar"); @@ -587,7 +529,7 @@ mod tests { #[test] fn profile_provider_bad_profile() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let provider = ProfileProvider::with_configuration( "tests/sample-data/multiple_profile_credentials", "not_a_profile", @@ -603,7 +545,7 @@ mod tests { #[test] fn profile_provider_credential_process() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var( AWS_CONFIG_FILE, "tests/sample-data/credential_process_config", @@ -616,12 +558,43 @@ mod tests { let creds = result.ok().unwrap(); assert_eq!(creds.aws_access_key_id(), "baz_access_key"); assert_eq!(creds.aws_secret_access_key(), "baz_secret_key"); + assert_eq!( + creds.token().as_ref().expect("session token not parsed"), + "baz_session_token" + ); + assert!(creds.expires_at().is_some()); + + env::remove_var(AWS_CONFIG_FILE); + } + + #[test] + fn profile_provider_credential_process_foo() { + let _guard = lock_env(); + env::set_var( + AWS_CONFIG_FILE, + "tests/sample-data/credential_process_config", + ); + let mut provider = ProfileProvider::new().unwrap(); + provider.set_profile("foo"); + let result = provider.credentials().wait(); + + assert!(result.is_ok()); + + let creds = result.ok().unwrap(); + assert_eq!(creds.aws_access_key_id(), "foo_access_key"); + assert_eq!(creds.aws_secret_access_key(), "foo_secret_key"); + assert_eq!( + creds.token().as_ref().expect("session token not parsed"), + "foo_session_token" + ); + assert!(creds.expires_at().is_some()); + env::remove_var(AWS_CONFIG_FILE); } #[test] fn profile_provider_profile_name() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); let mut provider = ProfileProvider::new().unwrap(); assert_eq!(DEFAULT, provider.profile()); provider.set_profile("foo"); @@ -639,22 +612,22 @@ mod tests { #[test] fn parse_credentials_bad_path() { - let result = super::parse_credentials_file(Path::new("/bad/file/path")); + let result = super::parse_credentials_file("/bad/file/path"); assert_eq!( result.err(), Some(CredentialsError::new( - "Couldn\'t stat credentials file: [ \"/bad/file/path\" ]. Non existant, or no permission.", + "Invalid credentials file: [ /bad/file/path ].", )) ); } #[test] fn parse_credentials_directory_path() { - let result = super::parse_credentials_file(Path::new("tests/")); + let result = super::parse_credentials_file("tests/"); assert_eq!( result.err(), Some(CredentialsError::new( - "Credentials file: [ \"tests/\" ] is not a file.", + "Invalid credentials file: [ tests/ ].", )) ); } @@ -678,7 +651,7 @@ mod tests { #[test] fn default_profile_name_from_env_var() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_PROFILE, "bar"); assert_eq!("bar", ProfileProvider::default_profile_name()); env::remove_var(AWS_PROFILE); @@ -686,7 +659,7 @@ mod tests { #[test] fn default_profile_name_from_empty_env_var() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_PROFILE, ""); assert_eq!(DEFAULT, ProfileProvider::default_profile_name()); env::remove_var(AWS_PROFILE); @@ -694,14 +667,14 @@ mod tests { #[test] fn default_profile_name() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_PROFILE); assert_eq!(DEFAULT, ProfileProvider::default_profile_name()); } #[test] fn default_profile_location_from_env_var() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_SHARED_CREDENTIALS_FILE, "bar"); assert_eq!( Ok(PathBuf::from("bar")), @@ -712,7 +685,7 @@ mod tests { #[test] fn default_profile_location_from_empty_env_var() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::set_var(AWS_SHARED_CREDENTIALS_FILE, ""); assert_eq!( ProfileProvider::hardcoded_profile_location(), @@ -723,12 +696,11 @@ mod tests { #[test] fn default_profile_location() { - let _guard = lock(&ENV_MUTEX); + let _guard = lock_env(); env::remove_var(AWS_SHARED_CREDENTIALS_FILE); assert_eq!( ProfileProvider::hardcoded_profile_location(), ProfileProvider::default_profile_location() ); } - } diff --git a/rusoto/credential/src/static_provider.rs b/rusoto/credential/src/static_provider.rs index 5a8ef8440c0..fd88a704548 100644 --- a/rusoto/credential/src/static_provider.rs +++ b/rusoto/credential/src/static_provider.rs @@ -77,6 +77,12 @@ impl ProvideAwsCredentials for StaticProvider { } } +impl From for StaticProvider { + fn from(credentials: AwsCredentials) -> Self { + StaticProvider{ credentials, valid_for: None } + } +} + #[cfg(test)] mod tests { use futures::Future; @@ -87,6 +93,13 @@ mod tests { use crate::test_utils::{is_secret_hidden_behind_asterisks, SECRET}; use crate::ProvideAwsCredentials; + #[test] + fn static_provider_impl_from_for_awscredentials() { + let provider = StaticProvider::from(AwsCredentials::default()); + assert_eq!(provider.get_aws_access_key_id(), ""); + assert_eq!(*provider.is_valid_for(), None); + } + #[test] fn test_static_provider_creation() { let result = StaticProvider::new( diff --git a/rusoto/credential/src/test_utils.rs b/rusoto/credential/src/test_utils.rs index 24d14890ad8..2b9c5fb61b8 100644 --- a/rusoto/credential/src/test_utils.rs +++ b/rusoto/credential/src/test_utils.rs @@ -1,5 +1,7 @@ #![cfg(test)] +use std::collections::HashMap; +use std::ffi::OsString; use std::fmt::Debug; use std::sync::{Mutex, MutexGuard}; @@ -13,18 +15,35 @@ where !debug.contains(SECRET) && debug.contains("**********") } -// cargo runs tests in parallel, which leads to race conditions when changing -// environment variables. Therefore we use a global mutex for all tests which -// rely on environment variables. -lazy_static! { - pub static ref ENV_MUTEX: Mutex<()> = Mutex::new(()); -} +// cargo runs tests in parallel, which leads to race conditions when changing environment +// variables. Therefore we use a global mutex for all tests which rely on environment variables. +// +// As failed (panic) tests will poison the global mutex, we use a helper which recovers from +// poisoned mutex. +// +// The first time the helper is called it stores the original environment. If the lock is poisoned, +// the environment is reset to the original state. +pub fn lock_env() -> MutexGuard<'static, ()> { + lazy_static! { + static ref ENV_MUTEX: Mutex<()> = Mutex::new(()); + static ref ORIGINAL_ENVIRONMENT: HashMap = + std::env::vars_os().collect(); + } -// As failed (panic) tests will poison the global mutex, we use a helper which -// recovers from poisoned mutex. -pub fn lock<'a, T>(mutex: &'a Mutex) -> MutexGuard<'a, T> { - match mutex.lock() { + let guard = ENV_MUTEX.lock(); + lazy_static::initialize(&ORIGINAL_ENVIRONMENT); + match guard { Ok(guard) => guard, - Err(poisoned) => poisoned.into_inner(), + Err(poisoned) => { + for (name, _) in std::env::vars_os() { + if !ORIGINAL_ENVIRONMENT.contains_key(&name) { + std::env::remove_var(name); + } + } + for (name, value) in ORIGINAL_ENVIRONMENT.iter() { + std::env::set_var(name, value); + } + poisoned.into_inner() + } } } diff --git a/rusoto/credential/tests/instance-profile-test.rs b/rusoto/credential/tests/instance-profile-test.rs new file mode 100644 index 00000000000..54ee75a304b --- /dev/null +++ b/rusoto/credential/tests/instance-profile-test.rs @@ -0,0 +1,32 @@ +extern crate tokio_core; + +use rusoto_credential::{InstanceMetadataProvider, ProvideAwsCredentials}; +use std::time::Duration; +use tokio_core::reactor::Core; + +// This test is marked ignored because it requires special setup. +// It's run with the `credential_integration_test` Makefile target. +#[test] +#[ignore] +fn it_fetches_basic_role() { + // set env vars to point to local provider + let mut provider = InstanceMetadataProvider::new(); + provider.set_timeout(Duration::from_secs(5)); + provider.set_ip_addr_with_port("127.0.0.1", "8080"); + + let creds_future = provider.credentials(); + let mut core = Core::new().unwrap(); + let creds = match core.run(creds_future) { + Ok(creds) => creds, + Err(e) => panic!("Got error: {:?}", e), + }; + + assert_eq!(creds.aws_access_key_id(), "Access_key_id_value"); + assert_eq!(creds.aws_secret_access_key(), "Secret_access_key_value"); + assert_eq!(creds.token().as_ref(), Some(&"AAAAA".to_string())); + let dt = match creds.expires_at().as_ref() { + Some(d) => d.to_string(), + None => panic!("Expiration should be present"), + }; + assert_eq!(dt, "2015-08-04 06:32:37 UTC"); +} diff --git a/rusoto/credential/tests/sample-data/credential_process_config b/rusoto/credential/tests/sample-data/credential_process_config index b46587f4003..0f19bdcacfe 100644 --- a/rusoto/credential/tests/sample-data/credential_process_config +++ b/rusoto/credential/tests/sample-data/credential_process_config @@ -1,3 +1,7 @@ [default] credential_process = cat tests/sample-data/credential_process_sample_response region = us-east-2 + +[profile foo] +credential_process = cat tests/sample-data/credential_process_sample_response_foo +region = us-east-2 diff --git a/rusoto/credential/tests/sample-data/credential_process_sample_response b/rusoto/credential/tests/sample-data/credential_process_sample_response index 1bdf449127a..4444498e2cd 100644 --- a/rusoto/credential/tests/sample-data/credential_process_sample_response +++ b/rusoto/credential/tests/sample-data/credential_process_sample_response @@ -1 +1 @@ -{"Version":1,"AccessKeyId":"baz_access_key","SecretAccessKey":"baz_secret_key"} +{"Version":1,"AccessKeyId":"baz_access_key","SecretAccessKey":"baz_secret_key","SessionToken":"baz_session_token","Expiration":"2019-03-21T01:23:45+00:00"} diff --git a/rusoto/credential/tests/sample-data/credential_process_sample_response_foo b/rusoto/credential/tests/sample-data/credential_process_sample_response_foo new file mode 100644 index 00000000000..b0aa07ab1f1 --- /dev/null +++ b/rusoto/credential/tests/sample-data/credential_process_sample_response_foo @@ -0,0 +1 @@ +{"Version":1,"AccessKeyId":"foo_access_key","SecretAccessKey":"foo_secret_key","SessionToken":"foo_session_token","Expiration":"2019-03-21T01:23:45+00:00"} diff --git a/rusoto/credential/tests/sample-data/multiple_profile_config b/rusoto/credential/tests/sample-data/multiple_profile_config index b949e26de67..3f7243920fd 100644 --- a/rusoto/credential/tests/sample-data/multiple_profile_config +++ b/rusoto/credential/tests/sample-data/multiple_profile_config @@ -4,6 +4,9 @@ region = us-east-2 [profile foo] region = us-east-3 output = json + [profile bar] region = us-east-4 -output = json \ No newline at end of file +output = json +# comments = comments +# comments \ No newline at end of file diff --git a/rusoto/credential_service_mock/Cargo.toml b/rusoto/credential_service_mock/Cargo.toml new file mode 100644 index 00000000000..8d750677e04 --- /dev/null +++ b/rusoto/credential_service_mock/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "credential_service_mock" +version = "0.1.0" +authors = ["Matthew Mayer "] +edition = "2018" + +[dependencies] +warp = "0.1" diff --git a/rusoto/credential_service_mock/run-and-test.sh b/rusoto/credential_service_mock/run-and-test.sh new file mode 100755 index 00000000000..2079c79f867 --- /dev/null +++ b/rusoto/credential_service_mock/run-and-test.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +cargo build; cargo run & + +cd ../credential ; sleep 5 ; cargo test -- --ignored ; killall credential_service_mock diff --git a/rusoto/credential_service_mock/src/main.rs b/rusoto/credential_service_mock/src/main.rs new file mode 100644 index 00000000000..4489e9ebd67 --- /dev/null +++ b/rusoto/credential_service_mock/src/main.rs @@ -0,0 +1,21 @@ +use warp::{self, path, Filter}; + +fn main() { + let instance_profile_role = path!("latest" / "meta-data" / "iam" / "security-credentials") + .map(|| "testrole"); + let instance_profile_creds = path!("latest" / "meta-data" / "iam" / "security-credentials" / "testrole") + .map(|| r#"{ + "Code" : "Success", + "LastUpdated" : "2015-08-04T00:09:23Z", + "Type" : "AWS-HMAC", + "AccessKeyId" : "Access_key_id_value", + "SecretAccessKey" : "Secret_access_key_value", + "Token" : "AAAAA", + "Expiration" : "2015-08-04T06:32:37Z" +}"#); + + let routes = warp::get2().and(instance_profile_creds).or(instance_profile_role); + + warp::serve(routes) + .run(([127, 0, 0, 1], 8080)); +} \ No newline at end of file diff --git a/rusoto/services/acm-pca/Cargo.toml b/rusoto/services/acm-pca/Cargo.toml index e163d6e1549..7734429e640 100644 --- a/rusoto/services/acm-pca/Cargo.toml +++ b/rusoto/services/acm-pca/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_acm_pca" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/acm-pca/README.md b/rusoto/services/acm-pca/README.md index 7b58c908048..12ff484c875 100644 --- a/rusoto/services/acm-pca/README.md +++ b/rusoto/services/acm-pca/README.md @@ -23,9 +23,16 @@ To use `rusoto_acm_pca` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_acm_pca = "0.40.0" +rusoto_acm_pca = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/acm-pca/src/custom/mod.rs b/rusoto/services/acm-pca/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/acm-pca/src/custom/mod.rs +++ b/rusoto/services/acm-pca/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/acm-pca/src/generated.rs b/rusoto/services/acm-pca/src/generated.rs index f39acd967d1..829da7adc92 100644 --- a/rusoto/services/acm-pca/src/generated.rs +++ b/rusoto/services/acm-pca/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -85,9 +84,9 @@ pub struct ASN1Subject { pub title: Option, } -///

Contains information about your private certificate authority (CA). Your private CA can issue and revoke X.509 digital certificates. Digital certificates verify that the entity named in the certificate Subject field owns or controls the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority operation to create your private CA. You must then call the GetCertificateAuthorityCertificate operation to retrieve a private CA certificate signing request (CSR). Take the CSR to your on-premises CA and sign it with the root CA certificate or a subordinate certificate. Call the ImportCertificateAuthorityCertificate operation to import the signed certificate into AWS Certificate Manager (ACM).

+///

Contains information about your private certificate authority (CA). Your private CA can issue and revoke X.509 digital certificates. Digital certificates verify that the entity named in the certificate Subject field owns or controls the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority action to create your private CA. You must then call the GetCertificateAuthorityCertificate action to retrieve a private CA certificate signing request (CSR). Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA certificate. Call the ImportCertificateAuthorityCertificate action to import the signed certificate into AWS Certificate Manager (ACM).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CertificateAuthority { ///

Amazon Resource Name (ARN) for your private certificate authority (CA). The format is 12345678-1234-1234-1234-123456789012 .

#[serde(rename = "Arn")] @@ -117,7 +116,7 @@ pub struct CertificateAuthority { #[serde(rename = "NotBefore")] #[serde(skip_serializing_if = "Option::is_none")] pub not_before: Option, - ///

The period during which a deleted CA can be restored. For more information, see the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthorityRequest operation.

+ ///

The period during which a deleted CA can be restored. For more information, see the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthorityRequest action.

#[serde(rename = "RestorableUntil")] #[serde(skip_serializing_if = "Option::is_none")] pub restorable_until: Option, @@ -139,10 +138,10 @@ pub struct CertificateAuthority { pub type_: Option, } -///

Contains configuration information for your private certificate authority (CA). This includes information about the class of public key algorithm and the key pair that your private CA creates when it issues a certificate. It also includes the signature algorithm that it uses when issuing certificates, and its X.500 distinguished name. You must specify this information when you call the CreateCertificateAuthority operation.

+///

Contains configuration information for your private certificate authority (CA). This includes information about the class of public key algorithm and the key pair that your private CA creates when it issues a certificate. It also includes the signature algorithm that it uses when issuing certificates, and its X.500 distinguished name. You must specify this information when you call the CreateCertificateAuthority action.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CertificateAuthorityConfiguration { - ///

Type of the public key algorithm and size, in bits, of the key pair that your key pair creates when it issues a certificate.

+ ///

Type of the public key algorithm and size, in bits, of the key pair that your CA creates when it issues a certificate. When you create a subordinate CA, you must use a key algorithm supported by the parent CA.

#[serde(rename = "KeyAlgorithm")] pub key_algorithm: String, ///

Name of the algorithm your private CA uses to sign certificate requests.

@@ -167,7 +166,7 @@ pub struct CreateCertificateAuthorityAuditReportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCertificateAuthorityAuditReportResponse { ///

An alphanumeric string that contains a report identifier.

#[serde(rename = "AuditReportId")] @@ -184,25 +183,25 @@ pub struct CreateCertificateAuthorityRequest { ///

Name and bit size of the private key algorithm, the name of the signing algorithm, and X.500 certificate subject information.

#[serde(rename = "CertificateAuthorityConfiguration")] pub certificate_authority_configuration: CertificateAuthorityConfiguration, - ///

The type of the certificate authority. Currently, this must be SUBORDINATE.

+ ///

The type of the certificate authority.

#[serde(rename = "CertificateAuthorityType")] pub certificate_authority_type: String, - ///

Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority multiple times with the same idempotency token within a five minute period, ACM PCA recognizes that you are requesting only one certificate. As a result, ACM PCA issues only one. If you change the idempotency token for each call, however, ACM PCA recognizes that you are requesting multiple certificates.

+ ///

Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority multiple times with the same idempotency token within a five minute period, ACM Private CA recognizes that you are requesting only one certificate. As a result, ACM Private CA issues only one. If you change the idempotency token for each call, however, ACM Private CA recognizes that you are requesting multiple certificates.

#[serde(rename = "IdempotencyToken")] #[serde(skip_serializing_if = "Option::is_none")] pub idempotency_token: Option, - ///

Contains a Boolean value that you can use to enable a certification revocation list (CRL) for the CA, the name of the S3 bucket to which ACM PCA will write the CRL, and an optional CNAME alias that you can use to hide the name of your bucket in the CRL Distribution Points extension of your CA certificate. For more information, see the CrlConfiguration structure.

+ ///

Contains a Boolean value that you can use to enable a certification revocation list (CRL) for the CA, the name of the S3 bucket to which ACM Private CA will write the CRL, and an optional CNAME alias that you can use to hide the name of your bucket in the CRL Distribution Points extension of your CA certificate. For more information, see the CrlConfiguration structure.

#[serde(rename = "RevocationConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub revocation_configuration: Option, - ///

Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA.

+ ///

Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA. For information using tags with

IAM to manage permissions, see Controlling Access Using IAM Tags.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCertificateAuthorityResponse { ///

If successful, the Amazon Resource Name (ARN) of the certificate authority (CA). This is of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

#[serde(rename = "CertificateAuthorityArn")] @@ -215,7 +214,7 @@ pub struct CreatePermissionRequest { ///

The actions that the specified AWS service principal can use. These include IssueCertificate, GetCertificate, and ListPermissions.

#[serde(rename = "Actions")] pub actions: Vec, - ///

The Amazon Resource Name (ARN) of the CA that grants the permissions. You can find the ARN by calling the ListCertificateAuthorities operation. This must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

+ ///

The Amazon Resource Name (ARN) of the CA that grants the permissions. You can find the ARN by calling the ListCertificateAuthorities action. This must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, ///

The AWS service or identity that receives the permission. At this time, the only valid principal is acm.amazonaws.com.

@@ -227,21 +226,21 @@ pub struct CreatePermissionRequest { pub source_account: Option, } -///

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. Your S3 bucket policy must give write permission to ACM PCA.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next update or when a certificate is revoked. When a certificate is revoked, it is recorded in the next CRL that is generated and in the next audit report. Only time valid certificates are listed in the CRL. Expired certificates are not included.

CRLs contain the following fields:

  • Version: The current version number defined in RFC 5280 is V2. The integer value is 0x1.

  • Signature Algorithm: The name of the algorithm used to sign the CRL.

  • Issuer: The X.500 distinguished name of your private CA that issued the CRL.

  • Last Update: The issue date and time of this CRL.

  • Next Update: The day and time by which the next CRL will be issued.

  • Revoked Certificates: List of revoked certificates. Each list item contains the following information.

    • Serial Number: The serial number, in hexadecimal format, of the revoked certificate.

    • Revocation Date: Date and time the certificate was revoked.

    • CRL Entry Extensions: Optional extensions for the CRL entry.

      • X509v3 CRL Reason Code: Reason the certificate was revoked.

  • CRL Extensions: Optional extensions for the CRL.

    • X509v3 Authority Key Identifier: Identifies the public key associated with the private key used to sign the certificate.

    • X509v3 CRL Number:: Decimal sequence number for the CRL.

  • Signature Algorithm: Algorithm used by your private CA to sign the CRL.

  • Signature Value: Signature computed over the CRL.

Certificate revocation lists created by ACM PCA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

+///

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. Your S3 bucket policy must give write permission to ACM Private CA.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next update or when a certificate is revoked. When a certificate is revoked, it is recorded in the next CRL that is generated and in the next audit report. Only time valid certificates are listed in the CRL. Expired certificates are not included.

CRLs contain the following fields:

  • Version: The current version number defined in RFC 5280 is V2. The integer value is 0x1.

  • Signature Algorithm: The name of the algorithm used to sign the CRL.

  • Issuer: The X.500 distinguished name of your private CA that issued the CRL.

  • Last Update: The issue date and time of this CRL.

  • Next Update: The day and time by which the next CRL will be issued.

  • Revoked Certificates: List of revoked certificates. Each list item contains the following information.

    • Serial Number: The serial number, in hexadecimal format, of the revoked certificate.

    • Revocation Date: Date and time the certificate was revoked.

    • CRL Entry Extensions: Optional extensions for the CRL entry.

      • X509v3 CRL Reason Code: Reason the certificate was revoked.

  • CRL Extensions: Optional extensions for the CRL.

    • X509v3 Authority Key Identifier: Identifies the public key associated with the private key used to sign the certificate.

    • X509v3 CRL Number:: Decimal sequence number for the CRL.

  • Signature Algorithm: Algorithm used by your private CA to sign the CRL.

  • Signature Value: Signature computed over the CRL.

Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CrlConfiguration { ///

Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public.

#[serde(rename = "CustomCname")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_cname: Option, - ///

Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the CreateCertificateAuthority operation or for an existing CA when you call the UpdateCertificateAuthority operation.

+ ///

Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the CreateCertificateAuthority action or for an existing CA when you call the UpdateCertificateAuthority action.

#[serde(rename = "Enabled")] pub enabled: bool, ///

Number of days until a certificate expires.

#[serde(rename = "ExpirationInDays")] #[serde(skip_serializing_if = "Option::is_none")] pub expiration_in_days: Option, - ///

Name of the S3 bucket that contains the CRL. If you do not provide a value for the CustomCname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You can change the name of your bucket by calling the UpdateCertificateAuthority operation. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket.

+ ///

Name of the S3 bucket that contains the CRL. If you do not provide a value for the CustomCname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You can change the name of your bucket by calling the UpdateCertificateAuthority action. You must specify a bucket policy that allows ACM Private CA to write the CRL to your bucket.

#[serde(rename = "S3BucketName")] #[serde(skip_serializing_if = "Option::is_none")] pub s3_bucket_name: Option, @@ -260,13 +259,13 @@ pub struct DeleteCertificateAuthorityRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeletePermissionRequest { - ///

The Amazon Resource Number (ARN) of the private CA that issued the permissions. You can find the CA's ARN by calling the ListCertificateAuthorities operation. This must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

+ ///

The Amazon Resource Number (ARN) of the private CA that issued the permissions. You can find the CA's ARN by calling the ListCertificateAuthorities action. This must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, ///

The AWS service or identity that will have its CA permissions revoked. At this time, the only valid service principal is acm.amazonaws.com

#[serde(rename = "Principal")] pub principal: String, - ///

The AWS account that calls this operation.

+ ///

The AWS account that calls this action.

#[serde(rename = "SourceAccount")] #[serde(skip_serializing_if = "Option::is_none")] pub source_account: Option, @@ -274,7 +273,7 @@ pub struct DeletePermissionRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeCertificateAuthorityAuditReportRequest { - ///

The report ID returned by calling the CreateCertificateAuthorityAuditReport operation.

+ ///

The report ID returned by calling the CreateCertificateAuthorityAuditReport action.

#[serde(rename = "AuditReportId")] pub audit_report_id: String, ///

The Amazon Resource Name (ARN) of the private CA. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

@@ -283,7 +282,7 @@ pub struct DescribeCertificateAuthorityAuditReportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCertificateAuthorityAuditReportResponse { ///

Specifies whether report creation is in progress, has succeeded, or has failed.

#[serde(rename = "AuditReportStatus")] @@ -311,7 +310,7 @@ pub struct DescribeCertificateAuthorityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCertificateAuthorityResponse { ///

A CertificateAuthority structure that contains information about your private CA.

#[serde(rename = "CertificateAuthority")] @@ -327,13 +326,13 @@ pub struct GetCertificateAuthorityCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCertificateAuthorityCertificateResponse { ///

Base64-encoded certificate authority (CA) certificate.

#[serde(rename = "Certificate")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate: Option, - ///

Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate.

+ ///

Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. If this is a root CA, the value will be null.

#[serde(rename = "CertificateChain")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_chain: Option, @@ -341,13 +340,13 @@ pub struct GetCertificateAuthorityCertificateResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetCertificateAuthorityCsrRequest { - ///

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority operation. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

+ ///

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCertificateAuthorityCsrResponse { ///

The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate.

#[serde(rename = "Csr")] @@ -366,7 +365,7 @@ pub struct GetCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCertificateResponse { ///

The base64 PEM-encoded certificate specified by the CertificateArn parameter.

#[serde(rename = "Certificate")] @@ -380,7 +379,7 @@ pub struct GetCertificateResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ImportCertificateAuthorityCertificateRequest { - ///

The PEM-encoded certificate for your private CA. This must be signed by using your on-premises CA.

+ ///

The PEM-encoded certificate for a private CA. This may be a self-signed certificate in the case of a root CA, or it may be signed by another CA that you control.

#[serde(rename = "Certificate")] #[serde( deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", @@ -391,14 +390,15 @@ pub struct ImportCertificateAuthorityCertificateRequest { ///

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, - ///

A PEM-encoded file that contains all of your certificates, other than the certificate you're importing, chaining up to your root CA. Your on-premises root certificate is the last in the chain, and each certificate in the chain signs the one preceding.

+ ///

A PEM-encoded file that contains all of your certificates, other than the certificate you're importing, chaining up to your root CA. Your ACM Private CA-hosted or on-premises root certificate is the last in the chain, and each certificate in the chain signs the one preceding.

This parameter must be supplied when you import a subordinate CA. When you import a root CA, there is no chain.

#[serde(rename = "CertificateChain")] #[serde( deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", default )] - pub certificate_chain: bytes::Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + pub certificate_chain: Option, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -414,20 +414,24 @@ pub struct IssueCertificateRequest { default )] pub csr: bytes::Bytes, - ///

Custom string that can be used to distinguish between calls to the IssueCertificate operation. Idempotency tokens time out after one hour. Therefore, if you call IssueCertificate multiple times with the same idempotency token within 5 minutes, ACM PCA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, PCA recognizes that you are requesting multiple certificates.

+ ///

Custom string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens time out after one hour. Therefore, if you call IssueCertificate multiple times with the same idempotency token within 5 minutes, ACM Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, PCA recognizes that you are requesting multiple certificates.

#[serde(rename = "IdempotencyToken")] #[serde(skip_serializing_if = "Option::is_none")] pub idempotency_token: Option, ///

The name of the algorithm that will be used to sign the certificate to be issued.

#[serde(rename = "SigningAlgorithm")] pub signing_algorithm: String, + ///

Specifies a custom configuration template to use when issuing a certificate. If this parameter is not provided, ACM Private CA defaults to the EndEntityCertificate/V1 template.

The following service-owned TemplateArn values are supported by ACM Private CA:

  • arn:aws:acm-pca:::template/EndEntityCertificate/V1

  • arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen0/V1

  • arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen1/V1

  • arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen2/V1

  • arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen3/V1

  • arn:aws:acm-pca:::template/RootCACertificate/V1

For more information, see Using Templates.

+ #[serde(rename = "TemplateArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub template_arn: Option, ///

The type of the validity period.

#[serde(rename = "Validity")] pub validity: Validity, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IssueCertificateResponse { ///

The Amazon Resource Name (ARN) of the issued certificate and the certificate serial number. This is of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245

#[serde(rename = "CertificateArn")] @@ -448,7 +452,7 @@ pub struct ListCertificateAuthoritiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCertificateAuthoritiesResponse { ///

Summary information about each certificate authority you have created.

#[serde(rename = "CertificateAuthorities")] @@ -462,7 +466,7 @@ pub struct ListCertificateAuthoritiesResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListPermissionsRequest { - ///

The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by calling the ListCertificateAuthorities operation. This must be of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 You can get a private CA's ARN by running the ListCertificateAuthorities operation.

+ ///

The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by calling the ListCertificateAuthorities action. This must be of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 You can get a private CA's ARN by running the ListCertificateAuthorities action.

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, ///

When paginating results, use this parameter to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

@@ -476,7 +480,7 @@ pub struct ListPermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPermissionsResponse { ///

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.

#[serde(rename = "NextToken")] @@ -490,7 +494,7 @@ pub struct ListPermissionsResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListTagsRequest { - ///

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority operation. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

+ ///

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, ///

Use this parameter when paginating results to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

@@ -504,7 +508,7 @@ pub struct ListTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.

#[serde(rename = "NextToken")] @@ -516,11 +520,11 @@ pub struct ListTagsResponse { pub tags: Option>, } -///

Permissions designate which private CA operations can be performed by an AWS service or entity. In order for ACM to automatically renew private certificates, you must give the ACM service principal all available permissions (IssueCertificate, GetCertificate, and ListPermissions). Permissions can be assigned with the CreatePermission operation, removed with the DeletePermission operation, and listed with the ListPermissions operation.

+///

Permissions designate which private CA actions can be performed by an AWS service or entity. In order for ACM to automatically renew private certificates, you must give the ACM service principal all available permissions (IssueCertificate, GetCertificate, and ListPermissions). Permissions can be assigned with the CreatePermission action, removed with the DeletePermission action, and listed with the ListPermissions action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Permission { - ///

The private CA operations that can be performed by the designated AWS service.

+ ///

The private CA actions that can be performed by the designated AWS service.

#[serde(rename = "Actions")] #[serde(skip_serializing_if = "Option::is_none")] pub actions: Option>, @@ -548,12 +552,12 @@ pub struct Permission { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RestoreCertificateAuthorityRequest { - ///

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority operation. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

+ ///

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, } -///

Certificate revocation information used by the CreateCertificateAuthority and UpdateCertificateAuthority operations. Your private certificate authority (CA) can create and maintain a certificate revocation list (CRL). A CRL contains information about certificates revoked by your CA. For more information, see RevokeCertificate.

+///

Certificate revocation information used by the CreateCertificateAuthority and UpdateCertificateAuthority actions. Your private certificate authority (CA) can create and maintain a certificate revocation list (CRL). A CRL contains information about certificates revoked by your CA. For more information, see RevokeCertificate.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RevocationConfiguration { ///

Configuration of the certificate revocation list (CRL), if any, maintained by your private CA.

@@ -567,7 +571,7 @@ pub struct RevokeCertificateRequest { ///

Amazon Resource Name (ARN) of the private CA that issued the certificate to be revoked. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

#[serde(rename = "CertificateAuthorityArn")] pub certificate_authority_arn: String, - ///

Serial number of the certificate to be revoked. This must be in hexadecimal format. You can retrieve the serial number by calling GetCertificate with the Amazon Resource Name (ARN) of the certificate you want and the ARN of your private CA. The GetCertificate operation retrieves the certificate in the PEM format. You can use the following OpenSSL command to list the certificate in text format and copy the hexadecimal serial number.

openssl x509 -in file_path -text -noout

You can also copy the serial number from the console or use the DescribeCertificate operation in the AWS Certificate Manager API Reference.

+ ///

Serial number of the certificate to be revoked. This must be in hexadecimal format. You can retrieve the serial number by calling GetCertificate with the Amazon Resource Name (ARN) of the certificate you want and the ARN of your private CA. The GetCertificate action retrieves the certificate in the PEM format. You can use the following OpenSSL command to list the certificate in text format and copy the hexadecimal serial number.

openssl x509 -in file_path -text -noout

You can also copy the serial number from the console or use the DescribeCertificate action in the AWS Certificate Manager API Reference.

#[serde(rename = "CertificateSerial")] pub certificate_serial: String, ///

Specifies why you revoked the certificate.

@@ -575,7 +579,7 @@ pub struct RevokeCertificateRequest { pub revocation_reason: String, } -///

Tags are labels that you can use to identify and organize your private CAs. Each tag consists of a key and an optional value. You can associate up to 50 tags with a private CA. To add one or more tags to a private CA, call the TagCertificateAuthority operation. To remove a tag, call the UntagCertificateAuthority operation.

+///

Tags are labels that you can use to identify and organize your private CAs. Each tag consists of a key and an optional value. You can associate up to 50 tags with a private CA. To add one or more tags to a private CA, call the TagCertificateAuthority action. To remove a tag, call the UntagCertificateAuthority action.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { ///

Key (name) of the tag.

@@ -622,7 +626,7 @@ pub struct UpdateCertificateAuthorityRequest { pub status: Option, } -///

Length of time for which the certificate issued by your private certificate authority (CA), or by the private CA itself, is valid in days, months, or years. You can issue a certificate by calling the IssueCertificate operation.

+///

Length of time for which the certificate issued by your private certificate authority (CA), or by the private CA itself, is valid in days, months, or years. You can issue a certificate by calling the IssueCertificate action.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Validity { ///

Specifies whether the Value parameter represents days, months, or years.

@@ -638,11 +642,11 @@ pub struct Validity { pub enum CreateCertificateAuthorityError { ///

One or more of the specified arguments was not valid.

InvalidArgs(String), - ///

The S3 bucket policy is not valid. The policy must give ACM PCA rights to read from and write to the bucket and find the bucket location.

+ ///

The S3 bucket policy is not valid. The policy must give ACM Private CA rights to read from and write to the bucket and find the bucket location.

InvalidPolicy(String), ///

The tag associated with the CA is not valid. The invalid argument is contained in the message field.

InvalidTag(String), - ///

An ACM PCA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

+ ///

An ACM Private CA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

LimitExceeded(String), } @@ -778,7 +782,7 @@ pub enum CreatePermissionError { InvalidArn(String), ///

The private CA is in a state during which a report or certificate cannot be generated.

InvalidState(String), - ///

An ACM PCA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

+ ///

An ACM Private CA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

LimitExceeded(String), ///

The designated permission has already been given to the user.

PermissionAlreadyExists(String), @@ -1234,6 +1238,8 @@ pub enum ImportCertificateAuthorityCertificateError { ConcurrentModification(String), ///

The requested Amazon Resource Name (ARN) does not refer to an existing resource.

InvalidArn(String), + ///

The request action cannot be performed or is prohibited.

+ InvalidRequest(String), ///

The private CA is in a state during which a report or certificate cannot be generated.

InvalidState(String), ///

One or more fields in the certificate are invalid.

@@ -1267,6 +1273,11 @@ impl ImportCertificateAuthorityCertificateError { ImportCertificateAuthorityCertificateError::InvalidArn(err.msg), ) } + "InvalidRequestException" => { + return RusotoError::Service( + ImportCertificateAuthorityCertificateError::InvalidRequest(err.msg), + ) + } "InvalidStateException" => { return RusotoError::Service( ImportCertificateAuthorityCertificateError::InvalidState(err.msg), @@ -1310,6 +1321,7 @@ impl Error for ImportCertificateAuthorityCertificateError { ImportCertificateAuthorityCertificateError::CertificateMismatch(ref cause) => cause, ImportCertificateAuthorityCertificateError::ConcurrentModification(ref cause) => cause, ImportCertificateAuthorityCertificateError::InvalidArn(ref cause) => cause, + ImportCertificateAuthorityCertificateError::InvalidRequest(ref cause) => cause, ImportCertificateAuthorityCertificateError::InvalidState(ref cause) => cause, ImportCertificateAuthorityCertificateError::MalformedCertificate(ref cause) => cause, ImportCertificateAuthorityCertificateError::RequestFailed(ref cause) => cause, @@ -1327,7 +1339,7 @@ pub enum IssueCertificateError { InvalidArn(String), ///

The private CA is in a state during which a report or certificate cannot be generated.

InvalidState(String), - ///

An ACM PCA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

+ ///

An ACM Private CA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

LimitExceeded(String), ///

The certificate signing request is invalid.

MalformedCSR(String), @@ -1480,6 +1492,8 @@ impl Error for ListPermissionsError { pub enum ListTagsError { ///

The requested Amazon Resource Name (ARN) does not refer to an existing resource.

InvalidArn(String), + ///

The private CA is in a state during which a report or certificate cannot be generated.

+ InvalidState(String), ///

A resource such as a private CA, S3 bucket, certificate, or audit report cannot be found.

ResourceNotFound(String), } @@ -1491,6 +1505,9 @@ impl ListTagsError { "InvalidArnException" => { return RusotoError::Service(ListTagsError::InvalidArn(err.msg)) } + "InvalidStateException" => { + return RusotoError::Service(ListTagsError::InvalidState(err.msg)) + } "ResourceNotFoundException" => { return RusotoError::Service(ListTagsError::ResourceNotFound(err.msg)) } @@ -1510,6 +1527,7 @@ impl Error for ListTagsError { fn description(&self) -> &str { match *self { ListTagsError::InvalidArn(ref cause) => cause, + ListTagsError::InvalidState(ref cause) => cause, ListTagsError::ResourceNotFound(ref cause) => cause, } } @@ -1574,9 +1592,11 @@ pub enum RevokeCertificateError { ConcurrentModification(String), ///

The requested Amazon Resource Name (ARN) does not refer to an existing resource.

InvalidArn(String), + ///

The request action cannot be performed or is prohibited.

+ InvalidRequest(String), ///

The private CA is in a state during which a report or certificate cannot be generated.

InvalidState(String), - ///

An ACM PCA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

+ ///

An ACM Private CA limit has been exceeded. See the exception message returned to determine the limit that was exceeded.

LimitExceeded(String), ///

Your request has already been completed.

RequestAlreadyProcessed(String), @@ -1600,6 +1620,9 @@ impl RevokeCertificateError { "InvalidArnException" => { return RusotoError::Service(RevokeCertificateError::InvalidArn(err.msg)) } + "InvalidRequestException" => { + return RusotoError::Service(RevokeCertificateError::InvalidRequest(err.msg)) + } "InvalidStateException" => { return RusotoError::Service(RevokeCertificateError::InvalidState(err.msg)) } @@ -1637,6 +1660,7 @@ impl Error for RevokeCertificateError { match *self { RevokeCertificateError::ConcurrentModification(ref cause) => cause, RevokeCertificateError::InvalidArn(ref cause) => cause, + RevokeCertificateError::InvalidRequest(ref cause) => cause, RevokeCertificateError::InvalidState(ref cause) => cause, RevokeCertificateError::LimitExceeded(ref cause) => cause, RevokeCertificateError::RequestAlreadyProcessed(ref cause) => cause, @@ -1775,7 +1799,7 @@ pub enum UpdateCertificateAuthorityError { InvalidArgs(String), ///

The requested Amazon Resource Name (ARN) does not refer to an existing resource.

InvalidArn(String), - ///

The S3 bucket policy is not valid. The policy must give ACM PCA rights to read from and write to the bucket and find the bucket location.

+ ///

The S3 bucket policy is not valid. The policy must give ACM Private CA rights to read from and write to the bucket and find the bucket location.

InvalidPolicy(String), ///

The private CA is in a state during which a report or certificate cannot be generated.

InvalidState(String), @@ -1845,13 +1869,13 @@ impl Error for UpdateCertificateAuthorityError { } /// Trait representing the capabilities of the ACM-PCA API. ACM-PCA clients implement this trait. pub trait AcmPca { - ///

Creates a private subordinate certificate authority (CA). You must specify the CA configuration, the revocation configuration, the CA type, and an optional idempotency token. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses to sign, and X.500 subject information. The CRL (certificate revocation list) configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this operation returns the Amazon Resource Name (ARN) of the CA.

+ ///

Creates a root or subordinate private certificate authority (CA). You must specify the CA configuration, the certificate revocation list (CRL) configuration, the CA type, and an optional idempotency token to avoid accidental creation of multiple CAs. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses, and X.500 subject information. The CRL configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this action returns the Amazon Resource Name (ARN) of the CA.

fn create_certificate_authority( &self, input: CreateCertificateAuthorityRequest, ) -> RusotoFuture; - ///

Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate operations use the private key. You can generate a new report every 30 minutes.

+ ///

Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use the private key.

fn create_certificate_authority_audit_report( &self, input: CreateCertificateAuthorityAuditReportRequest, @@ -1860,31 +1884,31 @@ pub trait AcmPca { CreateCertificateAuthorityAuditReportError, >; - ///

Assigns permissions from a private CA to a designated AWS service. Services are specified by their service principals and can be given permission to create and retrieve certificates on a private CA. Services can also be given permission to list the active permissions that the private CA has granted. For ACM to automatically renew your private CA's certificates, you must assign all possible permissions from the CA to the ACM service principal.

At this time, you can only assign permissions to ACM (acm.amazonaws.com). Permissions can be revoked with the DeletePermission operation and listed with the ListPermissions operation.

+ ///

Assigns permissions from a private CA to a designated AWS service. Services are specified by their service principals and can be given permission to create and retrieve certificates on a private CA. Services can also be given permission to list the active permissions that the private CA has granted. For ACM to automatically renew your private CA's certificates, you must assign all possible permissions from the CA to the ACM service principal.

At this time, you can only assign permissions to ACM (acm.amazonaws.com). Permissions can be revoked with the DeletePermission action and listed with the ListPermissions action.

fn create_permission( &self, input: CreatePermissionRequest, ) -> RusotoFuture<(), CreatePermissionError>; - ///

Deletes a private certificate authority (CA). You must provide the ARN (Amazon Resource Name) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities operation. Before you can delete a CA, you must disable it. Call the UpdateCertificateAuthority operation and set the CertificateAuthorityStatus parameter to DISABLED.

Additionally, you can delete a CA if you are waiting for it to be created (the Status field of the CertificateAuthority is CREATING). You can also delete it if the CA has been created but you haven't yet imported the signed certificate (the Status is PENDING_CERTIFICATE) into ACM PCA.

If the CA is in one of the previously mentioned states and you call DeleteCertificateAuthority, the CA's status changes to DELETED. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority operation returns the time remaining in the restoration window of a Private CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority operation.

+ ///

Deletes a private certificate authority (CA). You must provide the Amazon Resource Name (ARN) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities action.

Deleting a CA will invalidate other CAs and certificates below it in your CA hierarchy.

Before you can delete a CA that you have created and activated, you must disable it. To do this, call the UpdateCertificateAuthority action and set the CertificateAuthorityStatus parameter to DISABLED.

Additionally, you can delete a CA if you are waiting for it to be created (that is, the status of the CA is CREATING). You can also delete it if the CA has been created but you haven't yet imported the signed certificate into ACM Private CA (that is, the status of the CA is PENDING_CERTIFICATE).

When you successfully call DeleteCertificateAuthority, the CA's status changes to DELETED. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority action returns the time remaining in the restoration window of a private CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority action.

fn delete_certificate_authority( &self, input: DeleteCertificateAuthorityRequest, ) -> RusotoFuture<(), DeleteCertificateAuthorityError>; - ///

Revokes permissions that a private CA assigned to a designated AWS service. Permissions can be created with the CreatePermission operation and listed with the ListPermissions operation.

+ ///

Revokes permissions that a private CA assigned to a designated AWS service. Permissions can be created with the CreatePermission action and listed with the ListPermissions action.

fn delete_permission( &self, input: DeletePermissionRequest, ) -> RusotoFuture<(), DeletePermissionError>; - ///

Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following:

  • CREATING - ACM PCA is creating your private certificate authority.

  • PENDING_CERTIFICATE - The certificate is pending. You must use your on-premises root or subordinate CA to sign your private CA CSR and then import it into PCA.

  • ACTIVE - Your private CA is active.

  • DISABLED - Your private CA has been disabled.

  • EXPIRED - Your private CA certificate has expired.

  • FAILED - Your private CA has failed. Your CA can fail because of problems such a network outage or backend AWS failure or other errors. A failed CA can never return to the pending state. You must create a new CA.

  • DELETED - Your private CA is within the restoration period, after which it is permanently deleted. The length of time remaining in the CA's restoration period is also included in this operation's output.

+ ///

Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following:

  • CREATING - ACM Private CA is creating your private certificate authority.

  • PENDING_CERTIFICATE - The certificate is pending. You must use your ACM Private CA-hosted or on-premises root or subordinate CA to sign your private CA CSR and then import it into PCA.

  • ACTIVE - Your private CA is active.

  • DISABLED - Your private CA has been disabled.

  • EXPIRED - Your private CA certificate has expired.

  • FAILED - Your private CA has failed. Your CA can fail because of problems such a network outage or backend AWS failure or other errors. A failed CA can never return to the pending state. You must create a new CA.

  • DELETED - Your private CA is within the restoration period, after which it is permanently deleted. The length of time remaining in the CA's restoration period is also included in this action's output.

fn describe_certificate_authority( &self, input: DescribeCertificateAuthorityRequest, ) -> RusotoFuture; - ///

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport operation. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate operation or the RevokeCertificate operation.

+ ///

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport action. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate action or the RevokeCertificate action.

fn describe_certificate_authority_audit_report( &self, input: DescribeCertificateAuthorityAuditReportRequest, @@ -1893,7 +1917,7 @@ pub trait AcmPca { DescribeCertificateAuthorityAuditReportError, >; - ///

Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate operation. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate operation. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport operation to create a report that contains information about all of the certificates issued and revoked by your private CA.

+ ///

Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate action. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate action. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport action to create a report that contains information about all of the certificates issued and revoked by your private CA.

fn get_certificate( &self, input: GetCertificateRequest, @@ -1908,58 +1932,58 @@ pub trait AcmPca { GetCertificateAuthorityCertificateError, >; - ///

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority operation. Take the CSR to your on-premises X.509 infrastructure and sign it by using your root or a subordinate CA. Then import the signed certificate back into ACM PCA by calling the ImportCertificateAuthorityCertificate operation. The CSR is returned as a base64 PEM-encoded string.

+ ///

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority action. Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA. Then import the signed certificate back into ACM Private CA by calling the ImportCertificateAuthorityCertificate action. The CSR is returned as a base64 PEM-encoded string.

fn get_certificate_authority_csr( &self, input: GetCertificateAuthorityCsrRequest, ) -> RusotoFuture; - ///

Imports your signed private CA certificate into ACM PCA. Before you can call this operation, you must create the private certificate authority by calling the CreateCertificateAuthority operation. You must then generate a certificate signing request (CSR) by calling the GetCertificateAuthorityCsr operation. Take the CSR to your on-premises CA and use the root certificate or a subordinate certificate to sign it. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

Your certificate chain must not include the private CA certificate that you are importing.

Your on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

The chain must be PEM-encoded.

+ ///

Imports a signed private CA certificate into ACM Private CA. This action is used when you are using a chain of trust whose root is located outside ACM Private CA. Before you can call this action, the following preparations must in place:

  1. In ACM Private CA, call the CreateCertificateAuthority action to create the private CA that that you plan to back with the imported certificate.

  2. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR).

  3. Sign the CSR using a root or intermediate CA hosted either by an on-premises PKI hierarchy or a commercial CA..

  4. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

The following requirements apply when you import a CA certificate.

  • You cannot import a non-self-signed certificate for use as a root CA.

  • You cannot import a self-signed certificate for use as a subordinate CA.

  • Your certificate chain must not include the private CA certificate that you are importing.

  • Your ACM Private CA-hosted or on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

  • The chain must be PEM-encoded.

fn import_certificate_authority_certificate( &self, input: ImportCertificateAuthorityCertificateRequest, ) -> RusotoFuture<(), ImportCertificateAuthorityCertificateError>; - ///

Uses your private certificate authority (CA) to issue a client certificate. This operation returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate operation and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities operation to retrieve the ARNs of the certificates that you issue by using ACM PCA.

+ ///

Uses your private certificate authority (CA) to issue a client certificate. This action returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate action and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities action to retrieve the ARNs of the certificates that you issue by using ACM Private CA.

fn issue_certificate( &self, input: IssueCertificateRequest, ) -> RusotoFuture; - ///

Lists the private certificate authorities that you created by using the CreateCertificateAuthority operation.

+ ///

Lists the private certificate authorities that you created by using the CreateCertificateAuthority action.

fn list_certificate_authorities( &self, input: ListCertificateAuthoritiesRequest, ) -> RusotoFuture; - ///

Lists all the permissions, if any, that have been assigned by a private CA. Permissions can be granted with the CreatePermission operation and revoked with the DeletePermission operation.

+ ///

Lists all the permissions, if any, that have been assigned by a private CA. Permissions can be granted with the CreatePermission action and revoked with the DeletePermission action.

fn list_permissions( &self, input: ListPermissionsRequest, ) -> RusotoFuture; - ///

Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority operation to add one or more tags to your CA. Call the UntagCertificateAuthority operation to remove tags.

+ ///

Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority action to add one or more tags to your CA. Call the UntagCertificateAuthority action to remove tags.

fn list_tags(&self, input: ListTagsRequest) -> RusotoFuture; - ///

Restores a certificate authority (CA) that is in the DELETED state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority operation. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities operations. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority operation returns. To change its status to ACTIVE, call the UpdateCertificateAuthority operation. If the private CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate operation to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.

+ ///

Restores a certificate authority (CA) that is in the DELETED state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority action. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities actions. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority action returns. To change its status to ACTIVE, call the UpdateCertificateAuthority action. If the private CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate action to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.

fn restore_certificate_authority( &self, input: RestoreCertificateAuthorityRequest, ) -> RusotoFuture<(), RestoreCertificateAuthorityError>; - ///

Revokes a certificate that you issued by calling the IssueCertificate operation. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM PCA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM PCA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

+ ///

Revokes a certificate that was issued inside ACM Private CA. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM Private CA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM Private CA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

You cannot revoke a root CA self-signed certificate.

fn revoke_certificate( &self, input: RevokeCertificateRequest, ) -> RusotoFuture<(), RevokeCertificateError>; - ///

Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority operation. Call the ListTags operation to see what tags are associated with your CA.

+ ///

Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority action. Call the ListTags action to see what tags are associated with your CA.

fn tag_certificate_authority( &self, input: TagCertificateAuthorityRequest, ) -> RusotoFuture<(), TagCertificateAuthorityError>; - ///

Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this operation, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags operation to see what tags are associated with your CA.

+ ///

Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this action, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags action to see what tags are associated with your CA.

fn untag_certificate_authority( &self, input: UntagCertificateAuthorityRequest, @@ -1983,10 +2007,7 @@ impl AcmPcaClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AcmPcaClient { - AcmPcaClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2000,15 +2021,19 @@ impl AcmPcaClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AcmPcaClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AcmPcaClient { + AcmPcaClient { client, region } } } impl AcmPca for AcmPcaClient { - ///

Creates a private subordinate certificate authority (CA). You must specify the CA configuration, the revocation configuration, the CA type, and an optional idempotency token. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses to sign, and X.500 subject information. The CRL (certificate revocation list) configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this operation returns the Amazon Resource Name (ARN) of the CA.

+ ///

Creates a root or subordinate private certificate authority (CA). You must specify the CA configuration, the certificate revocation list (CRL) configuration, the CA type, and an optional idempotency token to avoid accidental creation of multiple CAs. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses, and X.500 subject information. The CRL configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this action returns the Amazon Resource Name (ARN) of the CA.

fn create_certificate_authority( &self, input: CreateCertificateAuthorityRequest, @@ -2034,7 +2059,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate operations use the private key. You can generate a new report every 30 minutes.

+ ///

Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use the private key.

fn create_certificate_authority_audit_report( &self, input: CreateCertificateAuthorityAuditReportRequest, @@ -2068,7 +2093,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Assigns permissions from a private CA to a designated AWS service. Services are specified by their service principals and can be given permission to create and retrieve certificates on a private CA. Services can also be given permission to list the active permissions that the private CA has granted. For ACM to automatically renew your private CA's certificates, you must assign all possible permissions from the CA to the ACM service principal.

At this time, you can only assign permissions to ACM (acm.amazonaws.com). Permissions can be revoked with the DeletePermission operation and listed with the ListPermissions operation.

+ ///

Assigns permissions from a private CA to a designated AWS service. Services are specified by their service principals and can be given permission to create and retrieve certificates on a private CA. Services can also be given permission to list the active permissions that the private CA has granted. For ACM to automatically renew your private CA's certificates, you must assign all possible permissions from the CA to the ACM service principal.

At this time, you can only assign permissions to ACM (acm.amazonaws.com). Permissions can be revoked with the DeletePermission action and listed with the ListPermissions action.

fn create_permission( &self, input: CreatePermissionRequest, @@ -2094,7 +2119,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Deletes a private certificate authority (CA). You must provide the ARN (Amazon Resource Name) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities operation. Before you can delete a CA, you must disable it. Call the UpdateCertificateAuthority operation and set the CertificateAuthorityStatus parameter to DISABLED.

Additionally, you can delete a CA if you are waiting for it to be created (the Status field of the CertificateAuthority is CREATING). You can also delete it if the CA has been created but you haven't yet imported the signed certificate (the Status is PENDING_CERTIFICATE) into ACM PCA.

If the CA is in one of the previously mentioned states and you call DeleteCertificateAuthority, the CA's status changes to DELETED. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority operation returns the time remaining in the restoration window of a Private CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority operation.

+ ///

Deletes a private certificate authority (CA). You must provide the Amazon Resource Name (ARN) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities action.

Deleting a CA will invalidate other CAs and certificates below it in your CA hierarchy.

Before you can delete a CA that you have created and activated, you must disable it. To do this, call the UpdateCertificateAuthority action and set the CertificateAuthorityStatus parameter to DISABLED.

Additionally, you can delete a CA if you are waiting for it to be created (that is, the status of the CA is CREATING). You can also delete it if the CA has been created but you haven't yet imported the signed certificate into ACM Private CA (that is, the status of the CA is PENDING_CERTIFICATE).

When you successfully call DeleteCertificateAuthority, the CA's status changes to DELETED. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority action returns the time remaining in the restoration window of a private CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority action.

fn delete_certificate_authority( &self, input: DeleteCertificateAuthorityRequest, @@ -2117,7 +2142,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Revokes permissions that a private CA assigned to a designated AWS service. Permissions can be created with the CreatePermission operation and listed with the ListPermissions operation.

+ ///

Revokes permissions that a private CA assigned to a designated AWS service. Permissions can be created with the CreatePermission action and listed with the ListPermissions action.

fn delete_permission( &self, input: DeletePermissionRequest, @@ -2143,7 +2168,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following:

  • CREATING - ACM PCA is creating your private certificate authority.

  • PENDING_CERTIFICATE - The certificate is pending. You must use your on-premises root or subordinate CA to sign your private CA CSR and then import it into PCA.

  • ACTIVE - Your private CA is active.

  • DISABLED - Your private CA has been disabled.

  • EXPIRED - Your private CA certificate has expired.

  • FAILED - Your private CA has failed. Your CA can fail because of problems such a network outage or backend AWS failure or other errors. A failed CA can never return to the pending state. You must create a new CA.

  • DELETED - Your private CA is within the restoration period, after which it is permanently deleted. The length of time remaining in the CA's restoration period is also included in this operation's output.

+ ///

Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following:

  • CREATING - ACM Private CA is creating your private certificate authority.

  • PENDING_CERTIFICATE - The certificate is pending. You must use your ACM Private CA-hosted or on-premises root or subordinate CA to sign your private CA CSR and then import it into PCA.

  • ACTIVE - Your private CA is active.

  • DISABLED - Your private CA has been disabled.

  • EXPIRED - Your private CA certificate has expired.

  • FAILED - Your private CA has failed. Your CA can fail because of problems such a network outage or backend AWS failure or other errors. A failed CA can never return to the pending state. You must create a new CA.

  • DELETED - Your private CA is within the restoration period, after which it is permanently deleted. The length of time remaining in the CA's restoration period is also included in this action's output.

fn describe_certificate_authority( &self, input: DescribeCertificateAuthorityRequest, @@ -2169,7 +2194,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport operation. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate operation or the RevokeCertificate operation.

+ ///

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport action. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate action or the RevokeCertificate action.

fn describe_certificate_authority_audit_report( &self, input: DescribeCertificateAuthorityAuditReportRequest, @@ -2203,7 +2228,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate operation. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate operation. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport operation to create a report that contains information about all of the certificates issued and revoked by your private CA.

+ ///

Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate action. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate action. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport action to create a report that contains information about all of the certificates issued and revoked by your private CA.

fn get_certificate( &self, input: GetCertificateRequest, @@ -2266,7 +2291,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority operation. Take the CSR to your on-premises X.509 infrastructure and sign it by using your root or a subordinate CA. Then import the signed certificate back into ACM PCA by calling the ImportCertificateAuthorityCertificate operation. The CSR is returned as a base64 PEM-encoded string.

+ ///

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority action. Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA. Then import the signed certificate back into ACM Private CA by calling the ImportCertificateAuthorityCertificate action. The CSR is returned as a base64 PEM-encoded string.

fn get_certificate_authority_csr( &self, input: GetCertificateAuthorityCsrRequest, @@ -2292,7 +2317,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Imports your signed private CA certificate into ACM PCA. Before you can call this operation, you must create the private certificate authority by calling the CreateCertificateAuthority operation. You must then generate a certificate signing request (CSR) by calling the GetCertificateAuthorityCsr operation. Take the CSR to your on-premises CA and use the root certificate or a subordinate certificate to sign it. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

Your certificate chain must not include the private CA certificate that you are importing.

Your on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

The chain must be PEM-encoded.

+ ///

Imports a signed private CA certificate into ACM Private CA. This action is used when you are using a chain of trust whose root is located outside ACM Private CA. Before you can call this action, the following preparations must in place:

  1. In ACM Private CA, call the CreateCertificateAuthority action to create the private CA that that you plan to back with the imported certificate.

  2. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR).

  3. Sign the CSR using a root or intermediate CA hosted either by an on-premises PKI hierarchy or a commercial CA..

  4. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

The following requirements apply when you import a CA certificate.

  • You cannot import a non-self-signed certificate for use as a root CA.

  • You cannot import a self-signed certificate for use as a subordinate CA.

  • Your certificate chain must not include the private CA certificate that you are importing.

  • Your ACM Private CA-hosted or on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

  • The chain must be PEM-encoded.

fn import_certificate_authority_certificate( &self, input: ImportCertificateAuthorityCertificateRequest, @@ -2320,7 +2345,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Uses your private certificate authority (CA) to issue a client certificate. This operation returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate operation and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities operation to retrieve the ARNs of the certificates that you issue by using ACM PCA.

+ ///

Uses your private certificate authority (CA) to issue a client certificate. This action returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate action and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities action to retrieve the ARNs of the certificates that you issue by using ACM Private CA.

fn issue_certificate( &self, input: IssueCertificateRequest, @@ -2349,7 +2374,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Lists the private certificate authorities that you created by using the CreateCertificateAuthority operation.

+ ///

Lists the private certificate authorities that you created by using the CreateCertificateAuthority action.

fn list_certificate_authorities( &self, input: ListCertificateAuthoritiesRequest, @@ -2375,7 +2400,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Lists all the permissions, if any, that have been assigned by a private CA. Permissions can be granted with the CreatePermission operation and revoked with the DeletePermission operation.

+ ///

Lists all the permissions, if any, that have been assigned by a private CA. Permissions can be granted with the CreatePermission action and revoked with the DeletePermission action.

fn list_permissions( &self, input: ListPermissionsRequest, @@ -2404,7 +2429,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority operation to add one or more tags to your CA. Call the UntagCertificateAuthority operation to remove tags.

+ ///

Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority action to add one or more tags to your CA. Call the UntagCertificateAuthority action to remove tags.

fn list_tags(&self, input: ListTagsRequest) -> RusotoFuture { let mut request = SignedRequest::new("POST", "acm-pca", &self.region, "/"); @@ -2430,7 +2455,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Restores a certificate authority (CA) that is in the DELETED state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority operation. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities operations. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority operation returns. To change its status to ACTIVE, call the UpdateCertificateAuthority operation. If the private CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate operation to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.

+ ///

Restores a certificate authority (CA) that is in the DELETED state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority action. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities actions. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority action returns. To change its status to ACTIVE, call the UpdateCertificateAuthority action. If the private CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate action to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.

fn restore_certificate_authority( &self, input: RestoreCertificateAuthorityRequest, @@ -2453,7 +2478,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Revokes a certificate that you issued by calling the IssueCertificate operation. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM PCA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM PCA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

+ ///

Revokes a certificate that was issued inside ACM Private CA. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM Private CA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM Private CA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

You cannot revoke a root CA self-signed certificate.

fn revoke_certificate( &self, input: RevokeCertificateRequest, @@ -2479,7 +2504,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority operation. Call the ListTags operation to see what tags are associated with your CA.

+ ///

Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority action. Call the ListTags action to see what tags are associated with your CA.

fn tag_certificate_authority( &self, input: TagCertificateAuthorityRequest, @@ -2502,7 +2527,7 @@ impl AcmPca for AcmPcaClient { }) } - ///

Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this operation, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags operation to see what tags are associated with your CA.

+ ///

Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this action, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags action to see what tags are associated with your CA.

fn untag_certificate_authority( &self, input: UntagCertificateAuthorityRequest, diff --git a/rusoto/services/acm-pca/src/lib.rs b/rusoto/services/acm-pca/src/lib.rs index c12ee71f6f9..6dea8e76ce6 100644 --- a/rusoto/services/acm-pca/src/lib.rs +++ b/rusoto/services/acm-pca/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

You can use the ACM PCA API to create a private certificate authority (CA). You must first call the CreateCertificateAuthority operation. If successful, the operation returns an Amazon Resource Name (ARN) for your private CA. Use this ARN as input to the GetCertificateAuthorityCsr operation to retrieve the certificate signing request (CSR) for your private CA certificate. Sign the CSR using the root or an intermediate CA in your on-premises PKI hierarchy, and call the ImportCertificateAuthorityCertificate to import your signed private CA certificate into ACM PCA.

Use your private CA to issue and revoke certificates. These are private certificates that identify and secure client computers, servers, applications, services, devices, and users over SSLS/TLS connections within your organization. Call the IssueCertificate operation to issue a certificate. Call the RevokeCertificate operation to revoke a certificate.

Certificates issued by your private CA can be trusted only within your organization, not publicly.

Your private CA can optionally create a certificate revocation list (CRL) to track the certificates you revoke. To create a CRL, you must specify a RevocationConfiguration object when you call the CreateCertificateAuthority operation. ACM PCA writes the CRL to an S3 bucket that you specify. You must specify a bucket policy that grants ACM PCA write permission.

You can also call the CreateCertificateAuthorityAuditReport to create an optional audit report, which enumerates all of the issued, valid, expired, and revoked certificates from the CA.

Each ACM PCA API operation has a throttling limit which determines the number of times the operation can be called per second. For more information, see API Rate Limits in ACM PCA in the ACM PCA user guide.

+//!

This is the ACM Private CA API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing private certificate authorities (CA) for your organization.

The documentation for each action shows the Query API request parameters and the XML response. Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

Each ACM Private CA API action has a throttling limit which determines the number of times the action can be called per second. For more information, see API Rate Limits in ACM Private CA in the ACM Private CA user guide.

//! //! If you're using the service, you're probably looking for [AcmPcaClient](struct.AcmPcaClient.html) and [AcmPca](trait.AcmPca.html). diff --git a/rusoto/services/acm/Cargo.toml b/rusoto/services/acm/Cargo.toml index 5419285165c..2871d794e46 100644 --- a/rusoto/services/acm/Cargo.toml +++ b/rusoto/services/acm/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_acm" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/acm/README.md b/rusoto/services/acm/README.md index 5371561a89c..39a8f428d85 100644 --- a/rusoto/services/acm/README.md +++ b/rusoto/services/acm/README.md @@ -23,9 +23,16 @@ To use `rusoto_acm` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_acm = "0.40.0" +rusoto_acm = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/acm/src/custom/mod.rs b/rusoto/services/acm/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/acm/src/custom/mod.rs +++ b/rusoto/services/acm/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/acm/src/generated.rs b/rusoto/services/acm/src/generated.rs index 0d570890b47..c6fa1f736d1 100644 --- a/rusoto/services/acm/src/generated.rs +++ b/rusoto/services/acm/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -36,7 +35,7 @@ pub struct AddTagsToCertificateRequest { ///

Contains metadata about an ACM certificate. This structure is returned in the response to a DescribeCertificate request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CertificateDetail { ///

The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

#[serde(rename = "CertificateArn")] @@ -155,7 +154,7 @@ pub struct CertificateOptions { ///

This structure is returned in the response object of ListCertificates action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CertificateSummary { ///

Amazon Resource Name (ARN) of the certificate. This is of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

#[serde(rename = "CertificateArn")] @@ -182,7 +181,7 @@ pub struct DescribeCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCertificateResponse { ///

Metadata about an ACM certificate.

#[serde(rename = "Certificate")] @@ -192,7 +191,7 @@ pub struct DescribeCertificateResponse { ///

Contains information about the validation of each domain name in the certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainValidation { ///

A fully qualified domain name (FQDN) in the certificate. For example, www.example.com or example.com.

#[serde(rename = "DomainName")] @@ -246,7 +245,7 @@ pub struct ExportCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportCertificateResponse { ///

The base64 PEM-encoded certificate.

#[serde(rename = "Certificate")] @@ -264,7 +263,7 @@ pub struct ExportCertificateResponse { ///

The Extended Key Usage X.509 v3 extension defines one or more purposes for which the public key can be used. This is in addition to or in place of the basic purposes specified by the Key Usage extension.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExtendedKeyUsage { ///

The name of an Extended Key Usage value.

#[serde(rename = "Name")] @@ -301,7 +300,7 @@ pub struct GetCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCertificateResponse { ///

String that contains the ACM certificate represented by the ARN specified at input.

#[serde(rename = "Certificate")] @@ -347,7 +346,7 @@ pub struct ImportCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportCertificateResponse { ///

The Amazon Resource Name (ARN) of the imported certificate.

#[serde(rename = "CertificateArn")] @@ -357,7 +356,7 @@ pub struct ImportCertificateResponse { ///

The Key Usage X.509 v3 extension defines the purpose of the public key contained in the certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyUsage { ///

A string value that contains a Key Usage extension name.

#[serde(rename = "Name")] @@ -386,7 +385,7 @@ pub struct ListCertificatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCertificatesResponse { ///

A list of ACM certificates.

#[serde(rename = "CertificateSummaryList")] @@ -406,7 +405,7 @@ pub struct ListTagsForCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForCertificateResponse { ///

The key-value pairs that define the applied tags.

#[serde(rename = "Tags")] @@ -433,7 +432,7 @@ pub struct RenewCertificateRequest { ///

Contains information about the status of ACM's managed renewal for the certificate. This structure exists only when the certificate type is AMAZON_ISSUED.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RenewalSummary { ///

Contains information about the validation of each domain name in the certificate, as it pertains to ACM's managed renewal. This is different from the initial validation that occurs as a result of the RequestCertificate request. This field exists only when the certificate type is AMAZON_ISSUED.

#[serde(rename = "DomainValidationOptions")] @@ -482,7 +481,7 @@ pub struct RequestCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestCertificateResponse { ///

String that contains the ARN of the issued certificate. This must be of the form:

arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012

#[serde(rename = "CertificateArn")] @@ -505,7 +504,7 @@ pub struct ResendValidationEmailRequest { ///

Contains a DNS record value that you can use to can use to validate ownership or control of a domain. This is used by the DescribeCertificate action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceRecord { ///

The name of the DNS record to create in your domain. This is supplied by ACM.

#[serde(rename = "Name")] @@ -1223,10 +1222,7 @@ impl AcmClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AcmClient { - AcmClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1240,10 +1236,14 @@ impl AcmClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AcmClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AcmClient { + AcmClient { client, region } } } diff --git a/rusoto/services/alexaforbusiness/Cargo.toml b/rusoto/services/alexaforbusiness/Cargo.toml index cd051e98060..a7723b09ea6 100644 --- a/rusoto/services/alexaforbusiness/Cargo.toml +++ b/rusoto/services/alexaforbusiness/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_alexaforbusiness" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/alexaforbusiness/README.md b/rusoto/services/alexaforbusiness/README.md index c73c315eaae..880054ef944 100644 --- a/rusoto/services/alexaforbusiness/README.md +++ b/rusoto/services/alexaforbusiness/README.md @@ -23,9 +23,16 @@ To use `rusoto_alexaforbusiness` in your application, add it as a dependency in ```toml [dependencies] -rusoto_alexaforbusiness = "0.40.0" +rusoto_alexaforbusiness = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/alexaforbusiness/src/custom/mod.rs b/rusoto/services/alexaforbusiness/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/alexaforbusiness/src/custom/mod.rs +++ b/rusoto/services/alexaforbusiness/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/alexaforbusiness/src/generated.rs b/rusoto/services/alexaforbusiness/src/generated.rs index f47465690ae..7622964876c 100644 --- a/rusoto/services/alexaforbusiness/src/generated.rs +++ b/rusoto/services/alexaforbusiness/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An address book with attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddressBook { ///

The ARN of the address book.

#[serde(rename = "AddressBookArn")] @@ -44,7 +43,7 @@ pub struct AddressBook { ///

Information related to an address book.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddressBookData { ///

The ARN of the address book.

#[serde(rename = "AddressBookArn")] @@ -68,7 +67,7 @@ pub struct ApproveSkillRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApproveSkillResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -82,7 +81,7 @@ pub struct AssociateContactWithAddressBookRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateContactWithAddressBookResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -96,7 +95,7 @@ pub struct AssociateDeviceWithNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDeviceWithNetworkProfileResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -112,7 +111,7 @@ pub struct AssociateDeviceWithRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDeviceWithRoomResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -128,7 +127,7 @@ pub struct AssociateSkillGroupWithRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateSkillGroupWithRoomResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -143,7 +142,7 @@ pub struct AssociateSkillWithSkillGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateSkillWithSkillGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -154,7 +153,7 @@ pub struct AssociateSkillWithUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateSkillWithUsersResponse {} ///

The audio message. There is a 1 MB limit on the audio file input and the only supported format is MP3. To convert your MP3 audio files to an Alexa-friendly,

required codec version (MPEG version 2) and bit rate (48 kbps), you might use converter software. One option for this is a command-line tool, FFmpeg. For more information, see FFmpeg. The following command converts the provided <input-file> to an MP3 file that is played in the announcement:

ffmpeg -i <input-file> -ac 2 -codec:a libmp3lame -b:a 48k -ar 16000 <output-file.mp3>

@@ -170,7 +169,7 @@ pub struct Audio { ///

Usage report with specified parameters.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BusinessReport { ///

The time of report delivery.

#[serde(rename = "DeliveryTime")] @@ -214,7 +213,7 @@ pub struct BusinessReportRecurrence { ///

The S3 location of the output reports.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BusinessReportS3Location { ///

The S3 bucket name of the output reports.

#[serde(rename = "BucketName")] @@ -228,7 +227,7 @@ pub struct BusinessReportS3Location { ///

The schedule of the usage report.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BusinessReportSchedule { ///

The content range of the reports.

#[serde(rename = "ContentRange")] @@ -266,7 +265,7 @@ pub struct BusinessReportSchedule { ///

The skill store category that is shown. Alexa skills are assigned a specific skill category during creation, such as News, Social, and Sports.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Category { ///

The ID of the skill store category.

#[serde(rename = "CategoryId")] @@ -289,7 +288,7 @@ pub struct ConferencePreference { ///

An entity that provides a conferencing solution. Alexa for Business acts as the voice interface and mediator that connects users to their preferred conference provider. Examples of conference providers include Amazon Chime, Zoom, Cisco, and Polycom.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConferenceProvider { ///

The ARN of the newly created conference provider.

#[serde(rename = "Arn")] @@ -319,7 +318,7 @@ pub struct ConferenceProvider { ///

A contact with attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Contact { ///

The ARN of the contact.

#[serde(rename = "ContactArn")] @@ -337,15 +336,23 @@ pub struct Contact { #[serde(rename = "LastName")] #[serde(skip_serializing_if = "Option::is_none")] pub last_name: Option, - ///

The phone number of the contact.

+ ///

The phone number of the contact. The phone number type defaults to WORK. You can either specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

#[serde(rename = "PhoneNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub phone_number: Option, + ///

The list of phone numbers for the contact.

+ #[serde(rename = "PhoneNumbers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub phone_numbers: Option>, + ///

The list of SIP addresses for the contact.

+ #[serde(rename = "SipAddresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sip_addresses: Option>, } ///

Information related to a contact.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContactData { ///

The ARN of the contact.

#[serde(rename = "ContactArn")] @@ -363,10 +370,18 @@ pub struct ContactData { #[serde(rename = "LastName")] #[serde(skip_serializing_if = "Option::is_none")] pub last_name: Option, - ///

The phone number of the contact.

+ ///

The phone number of the contact. The phone number type defaults to WORK. You can specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

#[serde(rename = "PhoneNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub phone_number: Option, + ///

The list of phone numbers for the contact.

+ #[serde(rename = "PhoneNumbers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub phone_numbers: Option>, + ///

The list of SIP addresses for the contact.

+ #[serde(rename = "SipAddresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sip_addresses: Option>, } ///

The content definition. This can contain only one text, SSML, or audio list object.

@@ -402,7 +417,7 @@ pub struct CreateAddressBookRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAddressBookResponse { ///

The ARN of the newly created address book.

#[serde(rename = "AddressBookArn")] @@ -441,7 +456,7 @@ pub struct CreateBusinessReportScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBusinessReportScheduleResponse { ///

The ARN of the business report schedule.

#[serde(rename = "ScheduleArn")] @@ -475,7 +490,7 @@ pub struct CreateConferenceProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConferenceProviderResponse { ///

The ARN of the newly-created conference provider.

#[serde(rename = "ConferenceProviderArn")] @@ -500,14 +515,22 @@ pub struct CreateContactRequest { #[serde(rename = "LastName")] #[serde(skip_serializing_if = "Option::is_none")] pub last_name: Option, - ///

The phone number of the contact in E.164 format.

+ ///

The phone number of the contact in E.164 format. The phone number type defaults to WORK. You can specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

#[serde(rename = "PhoneNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub phone_number: Option, + ///

The list of phone numbers for the contact.

+ #[serde(rename = "PhoneNumbers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub phone_numbers: Option>, + ///

The list of SIP addresses for the contact.

+ #[serde(rename = "SipAddresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sip_addresses: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateContactResponse { ///

The ARN of the newly created address book.

#[serde(rename = "ContactArn")] @@ -530,7 +553,7 @@ pub struct CreateGatewayGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGatewayGroupResponse { ///

The ARN of the created gateway group.

#[serde(rename = "GatewayGroupArn")] @@ -578,7 +601,7 @@ pub struct CreateNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNetworkProfileResponse { ///

The ARN of the network profile associated with a device.

#[serde(rename = "NetworkProfileArn")] @@ -598,6 +621,10 @@ pub struct CreateProfileRequest { ///

The distance unit to be used by devices in the profile.

#[serde(rename = "DistanceUnit")] pub distance_unit: String, + ///

The locale of the room profile.

+ #[serde(rename = "Locale")] + #[serde(skip_serializing_if = "Option::is_none")] + pub locale: Option, ///

The maximum volume limit for a room profile.

#[serde(rename = "MaxVolumeLimit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -625,7 +652,7 @@ pub struct CreateProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProfileResponse { ///

The ARN of the newly created room profile in the response.

#[serde(rename = "ProfileArn")] @@ -661,7 +688,7 @@ pub struct CreateRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRoomResponse { ///

The ARN of the newly created room in the response.

#[serde(rename = "RoomArn")] @@ -685,7 +712,7 @@ pub struct CreateSkillGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSkillGroupResponse { ///

The ARN of the newly created skill group in the response.

#[serde(rename = "SkillGroupArn")] @@ -721,7 +748,7 @@ pub struct CreateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserResponse { ///

The ARN of the newly created user in the response.

#[serde(rename = "UserArn")] @@ -737,7 +764,7 @@ pub struct DeleteAddressBookRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAddressBookResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -748,7 +775,7 @@ pub struct DeleteBusinessReportScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBusinessReportScheduleResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -759,7 +786,7 @@ pub struct DeleteConferenceProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteConferenceProviderResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -770,7 +797,7 @@ pub struct DeleteContactRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteContactResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -781,7 +808,7 @@ pub struct DeleteDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDeviceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -795,7 +822,7 @@ pub struct DeleteDeviceUsageDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDeviceUsageDataResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -806,7 +833,7 @@ pub struct DeleteGatewayGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGatewayGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -817,7 +844,7 @@ pub struct DeleteNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteNetworkProfileResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -829,7 +856,7 @@ pub struct DeleteProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProfileResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -841,7 +868,7 @@ pub struct DeleteRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRoomResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -859,7 +886,7 @@ pub struct DeleteRoomSkillParameterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRoomSkillParameterResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -874,7 +901,7 @@ pub struct DeleteSkillAuthorizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSkillAuthorizationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -886,7 +913,7 @@ pub struct DeleteSkillGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSkillGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -901,12 +928,12 @@ pub struct DeleteUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserResponse {} ///

The details about the developer that published the skill.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeveloperInfo { ///

The name of the developer.

#[serde(rename = "DeveloperName")] @@ -928,7 +955,7 @@ pub struct DeveloperInfo { ///

A device with attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Device { ///

The ARN of a device.

#[serde(rename = "DeviceArn")] @@ -974,7 +1001,7 @@ pub struct Device { ///

Device attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceData { ///

The ARN of a device.

#[serde(rename = "DeviceArn")] @@ -1028,7 +1055,7 @@ pub struct DeviceData { ///

The list of device events.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceEvent { ///

The time (in epoch) when the event occurred.

#[serde(rename = "Timestamp")] @@ -1046,7 +1073,7 @@ pub struct DeviceEvent { ///

Detailed information about a device's network profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceNetworkProfileInfo { ///

The ARN of the certificate associated with a device.

#[serde(rename = "CertificateArn")] @@ -1064,7 +1091,7 @@ pub struct DeviceNetworkProfileInfo { ///

Details of a device’s status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceStatusDetail { ///

The device status detail code.

#[serde(rename = "Code")] @@ -1078,7 +1105,7 @@ pub struct DeviceStatusDetail { ///

Detailed information about a device's status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceStatusInfo { ///

The latest available information about the connection status of a device.

#[serde(rename = "ConnectionStatus")] @@ -1101,7 +1128,7 @@ pub struct DisassociateContactFromAddressBookRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateContactFromAddressBookResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1113,7 +1140,7 @@ pub struct DisassociateDeviceFromRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDeviceFromRoomResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1128,7 +1155,7 @@ pub struct DisassociateSkillFromSkillGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateSkillFromSkillGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1139,7 +1166,7 @@ pub struct DisassociateSkillFromUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateSkillFromUsersResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1155,7 +1182,7 @@ pub struct DisassociateSkillGroupFromRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateSkillGroupFromRoomResponse {} ///

A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.

@@ -1177,12 +1204,12 @@ pub struct ForgetSmartHomeAppliancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ForgetSmartHomeAppliancesResponse {} ///

The details of the gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Gateway { ///

The ARN of the gateway.

#[serde(rename = "Arn")] @@ -1208,7 +1235,7 @@ pub struct Gateway { ///

The details of the gateway group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GatewayGroup { ///

The ARN of the gateway group.

#[serde(rename = "Arn")] @@ -1226,7 +1253,7 @@ pub struct GatewayGroup { ///

The summary of a gateway group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GatewayGroupSummary { ///

The ARN of the gateway group.

#[serde(rename = "Arn")] @@ -1244,7 +1271,7 @@ pub struct GatewayGroupSummary { ///

The summary of a gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GatewaySummary { ///

The ARN of the gateway.

#[serde(rename = "Arn")] @@ -1276,7 +1303,7 @@ pub struct GetAddressBookRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAddressBookResponse { ///

The details of the requested address book.

#[serde(rename = "AddressBook")] @@ -1288,7 +1315,7 @@ pub struct GetAddressBookResponse { pub struct GetConferencePreferenceRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConferencePreferenceResponse { ///

The conference preference.

#[serde(rename = "Preference")] @@ -1304,7 +1331,7 @@ pub struct GetConferenceProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConferenceProviderResponse { ///

The conference provider.

#[serde(rename = "ConferenceProvider")] @@ -1320,7 +1347,7 @@ pub struct GetContactRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetContactResponse { ///

The details of the requested contact.

#[serde(rename = "Contact")] @@ -1337,7 +1364,7 @@ pub struct GetDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceResponse { ///

The details of the device requested. Required.

#[serde(rename = "Device")] @@ -1353,7 +1380,7 @@ pub struct GetGatewayGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGatewayGroupResponse { #[serde(rename = "GatewayGroup")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1368,7 +1395,7 @@ pub struct GetGatewayRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGatewayResponse { ///

The details of the gateway.

#[serde(rename = "Gateway")] @@ -1380,7 +1407,7 @@ pub struct GetGatewayResponse { pub struct GetInvitationConfigurationRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInvitationConfigurationResponse { ///

The email ID of the organization or individual contact that the enrolled user can use.

#[serde(rename = "ContactEmail")] @@ -1404,7 +1431,7 @@ pub struct GetNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetNetworkProfileResponse { ///

The network profile associated with a device.

#[serde(rename = "NetworkProfile")] @@ -1421,7 +1448,7 @@ pub struct GetProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetProfileResponse { ///

The details of the room profile requested. Required.

#[serde(rename = "Profile")] @@ -1438,7 +1465,7 @@ pub struct GetRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRoomResponse { ///

The details of the room requested.

#[serde(rename = "Room")] @@ -1461,7 +1488,7 @@ pub struct GetRoomSkillParameterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRoomSkillParameterResponse { ///

The details of the room skill parameter requested. Required.

#[serde(rename = "RoomSkillParameter")] @@ -1478,7 +1505,7 @@ pub struct GetSkillGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSkillGroupResponse { ///

The details of the skill group requested. Required.

#[serde(rename = "SkillGroup")] @@ -1510,7 +1537,7 @@ pub struct ListBusinessReportSchedulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBusinessReportSchedulesResponse { ///

The schedule of the reports.

#[serde(rename = "BusinessReportSchedules")] @@ -1535,7 +1562,7 @@ pub struct ListConferenceProvidersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConferenceProvidersResponse { ///

The conference providers.

#[serde(rename = "ConferenceProviders")] @@ -1567,7 +1594,7 @@ pub struct ListDeviceEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeviceEventsResponse { ///

The device events requested for the device ARN.

#[serde(rename = "DeviceEvents")] @@ -1592,7 +1619,7 @@ pub struct ListGatewayGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGatewayGroupsResponse { ///

The gateway groups in the list.

#[serde(rename = "GatewayGroups")] @@ -1621,7 +1648,7 @@ pub struct ListGatewaysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGatewaysResponse { ///

The gateways in the list.

#[serde(rename = "Gateways")] @@ -1658,7 +1685,7 @@ pub struct ListSkillsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSkillsResponse { ///

The token returned to indicate that there is more data available.

#[serde(rename = "NextToken")] @@ -1683,7 +1710,7 @@ pub struct ListSkillsStoreCategoriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSkillsStoreCategoriesResponse { ///

The list of categories.

#[serde(rename = "CategoryList")] @@ -1711,7 +1738,7 @@ pub struct ListSkillsStoreSkillsByCategoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSkillsStoreSkillsByCategoryResponse { ///

The tokens used for pagination.

#[serde(rename = "NextToken")] @@ -1739,7 +1766,7 @@ pub struct ListSmartHomeAppliancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSmartHomeAppliancesResponse { ///

The tokens used for pagination.

#[serde(rename = "NextToken")] @@ -1767,7 +1794,7 @@ pub struct ListTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

The token returned to indicate that there is more data available.

#[serde(rename = "NextToken")] @@ -1789,7 +1816,7 @@ pub struct MeetingSetting { ///

The network profile associated with a device.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkProfile { ///

The ARN of the Private Certificate Authority (PCA) created in AWS Certificate Manager (ACM). This is used to issue certificates to the devices.

#[serde(rename = "CertificateAuthorityArn")] @@ -1835,7 +1862,7 @@ pub struct NetworkProfile { ///

The data associated with a network profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkProfileData { ///

The ARN of the Private Certificate Authority (PCA) created in AWS Certificate Manager (ACM). This is used to issue certificates to the devices.

#[serde(rename = "CertificateAuthorityArn")] @@ -1884,9 +1911,20 @@ pub struct PSTNDialIn { pub phone_number: String, } +///

The phone number for the contact containing the raw number and phone number type.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PhoneNumber { + ///

The raw value of the phone number.

+ #[serde(rename = "Number")] + pub number: String, + ///

The type of the phone number.

+ #[serde(rename = "Type")] + pub type_: String, +} + ///

A room profile with attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Profile { ///

The address of a room profile.

#[serde(rename = "Address")] @@ -1904,6 +1942,10 @@ pub struct Profile { #[serde(rename = "IsDefault")] #[serde(skip_serializing_if = "Option::is_none")] pub is_default: Option, + ///

The locale of a room profile.

+ #[serde(rename = "Locale")] + #[serde(skip_serializing_if = "Option::is_none")] + pub locale: Option, ///

The max volume limit of a room profile.

#[serde(rename = "MaxVolumeLimit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1940,7 +1982,7 @@ pub struct Profile { ///

The data of a room profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProfileData { ///

The address of a room profile.

#[serde(rename = "Address")] @@ -1954,6 +1996,10 @@ pub struct ProfileData { #[serde(rename = "IsDefault")] #[serde(skip_serializing_if = "Option::is_none")] pub is_default: Option, + ///

The locale of a room profile.

+ #[serde(rename = "Locale")] + #[serde(skip_serializing_if = "Option::is_none")] + pub locale: Option, ///

The ARN of a room profile.

#[serde(rename = "ProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1984,7 +2030,7 @@ pub struct PutConferencePreferenceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutConferencePreferenceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2003,7 +2049,7 @@ pub struct PutInvitationConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutInvitationConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2021,7 +2067,7 @@ pub struct PutRoomSkillParameterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRoomSkillParameterResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2039,7 +2085,7 @@ pub struct PutSkillAuthorizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutSkillAuthorizationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2062,7 +2108,7 @@ pub struct RegisterAVSDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterAVSDeviceResponse { ///

The ARN of the device.

#[serde(rename = "DeviceArn")] @@ -2078,7 +2124,7 @@ pub struct RejectSkillRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectSkillResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2092,7 +2138,7 @@ pub struct ResolveRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResolveRoomResponse { ///

The ARN of the room from which the skill request was invoked.

#[serde(rename = "RoomArn")] @@ -2121,12 +2167,12 @@ pub struct RevokeInvitationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RevokeInvitationResponse {} ///

A room with attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Room { ///

The description of a room.

#[serde(rename = "Description")] @@ -2152,7 +2198,7 @@ pub struct Room { ///

The data of a room.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RoomData { ///

The description of a room.

#[serde(rename = "Description")] @@ -2212,7 +2258,7 @@ pub struct SearchAddressBooksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchAddressBooksResponse { ///

The address books that meet the specified set of filter criteria, in sort order.

#[serde(rename = "AddressBooks")] @@ -2249,7 +2295,7 @@ pub struct SearchContactsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchContactsResponse { ///

The contacts that meet the specified set of filter criteria, in sort order.

#[serde(rename = "Contacts")] @@ -2286,7 +2332,7 @@ pub struct SearchDevicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchDevicesResponse { ///

The devices that meet the specified set of filter criteria, in sort order.

#[serde(rename = "Devices")] @@ -2323,7 +2369,7 @@ pub struct SearchNetworkProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchNetworkProfilesResponse { ///

The network profiles that meet the specified set of filter criteria, in sort order. It is a list of NetworkProfileData objects.

#[serde(rename = "NetworkProfiles")] @@ -2360,7 +2406,7 @@ pub struct SearchProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchProfilesResponse { ///

The token returned to indicate that there is more data available.

#[serde(rename = "NextToken")] @@ -2397,7 +2443,7 @@ pub struct SearchRoomsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchRoomsResponse { ///

The token returned to indicate that there is more data available.

#[serde(rename = "NextToken")] @@ -2434,7 +2480,7 @@ pub struct SearchSkillGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchSkillGroupsResponse { ///

The token returned to indicate that there is more data available.

#[serde(rename = "NextToken")] @@ -2471,7 +2517,7 @@ pub struct SearchUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchUsersResponse { ///

The token returned to indicate that there is more data available.

#[serde(rename = "NextToken")] @@ -2505,7 +2551,7 @@ pub struct SendAnnouncementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendAnnouncementResponse { ///

The identifier of the announcement.

#[serde(rename = "AnnouncementArn")] @@ -2522,12 +2568,23 @@ pub struct SendInvitationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendInvitationResponse {} +///

The SIP address for the contact containing the URI and SIP address type.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SipAddress { + ///

The type of the SIP address.

+ #[serde(rename = "Type")] + pub type_: String, + ///

The URI for the SIP address.

+ #[serde(rename = "Uri")] + pub uri: String, +} + ///

Granular information about the skill.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SkillDetails { ///

The details about what the skill supports organized as bullet points.

#[serde(rename = "BulletPoints")] @@ -2573,7 +2630,7 @@ pub struct SkillDetails { ///

A skill group with attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SkillGroup { ///

The description of a skill group.

#[serde(rename = "Description")] @@ -2591,7 +2648,7 @@ pub struct SkillGroup { ///

The attributes of a skill group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SkillGroupData { ///

The description of a skill group.

#[serde(rename = "Description")] @@ -2609,7 +2666,7 @@ pub struct SkillGroupData { ///

The summary of skills.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SkillSummary { ///

Whether the skill is enabled under the user's account, or if it requires linking to be used.

#[serde(rename = "EnablementType")] @@ -2635,7 +2692,7 @@ pub struct SkillSummary { ///

The detailed information about an Alexa skill.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SkillsStoreSkill { ///

The URL where the skill icon resides.

#[serde(rename = "IconUrl")] @@ -2669,7 +2726,7 @@ pub struct SkillsStoreSkill { ///

A smart home appliance that can connect to a central system. Any domestic device can be a smart appliance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SmartHomeAppliance { ///

The description of the smart home appliance.

#[serde(rename = "Description")] @@ -2723,7 +2780,7 @@ pub struct StartDeviceSyncRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartDeviceSyncResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2734,7 +2791,7 @@ pub struct StartSmartHomeApplianceDiscoveryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSmartHomeApplianceDiscoveryResponse {} ///

A key-value pair that can be associated with a resource.

@@ -2759,7 +2816,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

The text message.

@@ -2784,7 +2841,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2803,7 +2860,7 @@ pub struct UpdateAddressBookRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAddressBookResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2834,7 +2891,7 @@ pub struct UpdateBusinessReportScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBusinessReportScheduleResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2859,7 +2916,7 @@ pub struct UpdateConferenceProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConferenceProviderResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2879,14 +2936,22 @@ pub struct UpdateContactRequest { #[serde(rename = "LastName")] #[serde(skip_serializing_if = "Option::is_none")] pub last_name: Option, - ///

The updated phone number of the contact.

+ ///

The updated phone number of the contact. The phone number type defaults to WORK. You can either specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

#[serde(rename = "PhoneNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub phone_number: Option, + ///

The list of phone numbers for the contact.

+ #[serde(rename = "PhoneNumbers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub phone_numbers: Option>, + ///

The list of SIP addresses for the contact.

+ #[serde(rename = "SipAddresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sip_addresses: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateContactResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2902,7 +2967,7 @@ pub struct UpdateDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeviceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2921,7 +2986,7 @@ pub struct UpdateGatewayGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGatewayGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2944,7 +3009,7 @@ pub struct UpdateGatewayRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGatewayResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2979,7 +3044,7 @@ pub struct UpdateNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNetworkProfileResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2996,6 +3061,10 @@ pub struct UpdateProfileRequest { #[serde(rename = "IsDefault")] #[serde(skip_serializing_if = "Option::is_none")] pub is_default: Option, + ///

The updated locale for the room profile.

+ #[serde(rename = "Locale")] + #[serde(skip_serializing_if = "Option::is_none")] + pub locale: Option, ///

The updated maximum volume limit for the room profile.

#[serde(rename = "MaxVolumeLimit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -3031,7 +3100,7 @@ pub struct UpdateProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProfileResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3059,7 +3128,7 @@ pub struct UpdateRoomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRoomResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3079,12 +3148,12 @@ pub struct UpdateSkillGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSkillGroupResponse {} ///

Information related to a user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserData { ///

The email of a user.

#[serde(rename = "Email")] @@ -6734,7 +6803,7 @@ pub trait AlexaForBusiness { input: DeleteDeviceRequest, ) -> RusotoFuture; - ///

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data. This action can be called once every 24 hours for a specific shared device.

+ ///

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

fn delete_device_usage_data( &self, input: DeleteDeviceUsageDataRequest, @@ -7059,7 +7128,7 @@ pub trait AlexaForBusiness { input: SendAnnouncementRequest, ) -> RusotoFuture; - ///

Sends an enrollment invitation email with a URL to a user. The URL is valid for 72 hours or until you call this operation again, whichever comes first.

+ ///

Sends an enrollment invitation email with a URL to a user. The URL is valid for 30 days or until you call this operation again, whichever comes first.

fn send_invitation( &self, input: SendInvitationRequest, @@ -7167,10 +7236,7 @@ impl AlexaForBusinessClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AlexaForBusinessClient { - AlexaForBusinessClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7184,10 +7250,14 @@ impl AlexaForBusinessClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AlexaForBusinessClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AlexaForBusinessClient { + AlexaForBusinessClient { client, region } } } @@ -7825,7 +7895,7 @@ impl AlexaForBusiness for AlexaForBusinessClient { }) } - ///

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data. This action can be called once every 24 hours for a specific shared device.

+ ///

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

fn delete_device_usage_data( &self, input: DeleteDeviceUsageDataRequest, @@ -9390,7 +9460,7 @@ impl AlexaForBusiness for AlexaForBusinessClient { }) } - ///

Sends an enrollment invitation email with a URL to a user. The URL is valid for 72 hours or until you call this operation again, whichever comes first.

+ ///

Sends an enrollment invitation email with a URL to a user. The URL is valid for 30 days or until you call this operation again, whichever comes first.

fn send_invitation( &self, input: SendInvitationRequest, diff --git a/rusoto/services/amplify/Cargo.toml b/rusoto/services/amplify/Cargo.toml index 19648458335..29852247f51 100644 --- a/rusoto/services/amplify/Cargo.toml +++ b/rusoto/services/amplify/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_amplify" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/amplify/README.md b/rusoto/services/amplify/README.md index e402a9771d8..2b14952c3a0 100644 --- a/rusoto/services/amplify/README.md +++ b/rusoto/services/amplify/README.md @@ -23,9 +23,16 @@ To use `rusoto_amplify` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_amplify = "0.40.0" +rusoto_amplify = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/amplify/src/custom/mod.rs b/rusoto/services/amplify/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/amplify/src/custom/mod.rs +++ b/rusoto/services/amplify/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/amplify/src/generated.rs b/rusoto/services/amplify/src/generated.rs index 41983c43eda..7adf0f1379b 100644 --- a/rusoto/services/amplify/src/generated.rs +++ b/rusoto/services/amplify/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

Amplify App represents different branches of a repository for building, deploying, and hosting.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct App { ///

ARN for the Amplify App.

#[serde(rename = "appArn")] @@ -35,6 +34,14 @@ pub struct App { ///

Unique Id for the Amplify App.

#[serde(rename = "appId")] pub app_id: String, + ///

Automated branch creation config for the Amplify App.

+ #[serde(rename = "autoBranchCreationConfig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub auto_branch_creation_config: Option, + ///

Automated branch creation glob patterns for the Amplify App.

+ #[serde(rename = "autoBranchCreationPatterns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub auto_branch_creation_patterns: Option>, ///

Basic Authorization credentials for branches for the Amplify App.

#[serde(rename = "basicAuthCredentials")] #[serde(skip_serializing_if = "Option::is_none")] @@ -56,6 +63,10 @@ pub struct App { ///

Description for the Amplify App.

#[serde(rename = "description")] pub description: String, + ///

Enables automated branch creation for the Amplify App.

+ #[serde(rename = "enableAutoBranchCreation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_auto_branch_creation: Option, ///

Enables Basic Authorization for branches for the Amplify App.

#[serde(rename = "enableBasicAuth")] pub enable_basic_auth: bool, @@ -91,13 +102,50 @@ pub struct App { pub update_time: f64, } +///

Structure with auto branch creation config.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AutoBranchCreationConfig { + ///

Basic Authorization credentials for the auto created branch.

+ #[serde(rename = "basicAuthCredentials")] + #[serde(skip_serializing_if = "Option::is_none")] + pub basic_auth_credentials: Option, + ///

BuildSpec for the auto created branch.

+ #[serde(rename = "buildSpec")] + #[serde(skip_serializing_if = "Option::is_none")] + pub build_spec: Option, + ///

Enables auto building for the auto created branch.

+ #[serde(rename = "enableAutoBuild")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_auto_build: Option, + ///

Enables Basic Auth for the auto created branch.

+ #[serde(rename = "enableBasicAuth")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_basic_auth: Option, + ///

Environment Variables for the auto created branch.

+ #[serde(rename = "environmentVariables")] + #[serde(skip_serializing_if = "Option::is_none")] + pub environment_variables: Option<::std::collections::HashMap>, + ///

Framework for the auto created branch.

+ #[serde(rename = "framework")] + #[serde(skip_serializing_if = "Option::is_none")] + pub framework: Option, + ///

Stage for the auto created branch.

+ #[serde(rename = "stage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub stage: Option, +} + ///

Branch for an Amplify App, which maps to a 3rd party repository branch.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Branch { ///

Id of the active job for a branch, part of an Amplify App.

#[serde(rename = "activeJobId")] pub active_job_id: String, + ///

List of custom resources that are linked to this branch.

+ #[serde(rename = "associatedResources")] + #[serde(skip_serializing_if = "Option::is_none")] + pub associated_resources: Option>, ///

Basic Authorization credentials for a branch, part of an Amplify App.

#[serde(rename = "basicAuthCredentials")] #[serde(skip_serializing_if = "Option::is_none")] @@ -121,10 +169,9 @@ pub struct Branch { ///

Description for a branch, part of an Amplify App.

#[serde(rename = "description")] pub description: String, - ///

Display name for a branch, part of an Amplify App.

+ ///

Display name for a branch, will use as the default domain prefix.

#[serde(rename = "displayName")] - #[serde(skip_serializing_if = "Option::is_none")] - pub display_name: Option, + pub display_name: String, ///

Enables auto-building on push for a branch, part of an Amplify App.

#[serde(rename = "enableAutoBuild")] pub enable_auto_build: bool, @@ -147,7 +194,7 @@ pub struct Branch { #[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, - ///

Thumbnail Url for the branch.

+ ///

Thumbnail URL for the branch.

#[serde(rename = "thumbnailUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub thumbnail_url: Option, @@ -165,6 +212,18 @@ pub struct Branch { ///

Request structure used to create Apps in Amplify.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateAppRequest { + ///

Personal Access token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. Token is not stored.

+ #[serde(rename = "accessToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_token: Option, + ///

Automated branch creation config for the Amplify App.

+ #[serde(rename = "autoBranchCreationConfig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub auto_branch_creation_config: Option, + ///

Automated branch creation glob patterns for the Amplify App.

+ #[serde(rename = "autoBranchCreationPatterns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub auto_branch_creation_patterns: Option>, ///

Credentials for Basic Authorization for an Amplify App.

#[serde(rename = "basicAuthCredentials")] #[serde(skip_serializing_if = "Option::is_none")] @@ -181,6 +240,10 @@ pub struct CreateAppRequest { #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Enables automated branch creation for the Amplify App.

+ #[serde(rename = "enableAutoBranchCreation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_auto_branch_creation: Option, ///

Enable Basic Authorization for an Amplify App, this will apply to all branches part of this App.

#[serde(rename = "enableBasicAuth")] #[serde(skip_serializing_if = "Option::is_none")] @@ -202,13 +265,16 @@ pub struct CreateAppRequest { pub name: String, ///

OAuth token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. OAuth token is not stored.

#[serde(rename = "oauthToken")] - pub oauth_token: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub oauth_token: Option, ///

Platform / framework for an Amplify App

#[serde(rename = "platform")] - pub platform: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub platform: Option, ///

Repository for an Amplify App

#[serde(rename = "repository")] - pub repository: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub repository: Option, ///

Tag for an Amplify App

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -216,7 +282,7 @@ pub struct CreateAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAppResult { #[serde(rename = "app")] pub app: App, @@ -243,6 +309,10 @@ pub struct CreateBranchRequest { #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Display name for a branch, will use as the default domain prefix.

+ #[serde(rename = "displayName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub display_name: Option, ///

Enables auto building for the branch.

#[serde(rename = "enableAutoBuild")] #[serde(skip_serializing_if = "Option::is_none")] @@ -279,13 +349,44 @@ pub struct CreateBranchRequest { ///

Result structure for create branch request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBranchResult { ///

Branch structure for an Amplify App.

#[serde(rename = "branch")] pub branch: Branch, } +///

Request structure for create a new deployment.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateDeploymentRequest { + ///

Unique Id for an Amplify App.

+ #[serde(rename = "appId")] + pub app_id: String, + ///

Name for the branch, for the Job.

+ #[serde(rename = "branchName")] + pub branch_name: String, + ///

Optional file map that contains file name as the key and file content md5 hash as the value. If this argument is provided, the service will generate different upload url per file. Otherwise, the service will only generate a single upload url for the zipped files.

+ #[serde(rename = "fileMap")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_map: Option<::std::collections::HashMap>, +} + +///

Result structure for create a new deployment.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateDeploymentResult { + ///

When the fileMap argument is provided in the request, the fileUploadUrls will contain a map of file names to upload url.

+ #[serde(rename = "fileUploadUrls")] + pub file_upload_urls: ::std::collections::HashMap, + ///

The jobId for this deployment, will supply to start deployment api.

+ #[serde(rename = "jobId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_id: Option, + ///

When the fileMap argument is NOT provided. This zipUploadUrl will be returned.

+ #[serde(rename = "zipUploadUrl")] + pub zip_upload_url: String, +} + ///

Request structure for create Domain Association request.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateDomainAssociationRequest { @@ -306,13 +407,37 @@ pub struct CreateDomainAssociationRequest { ///

Result structure for the create Domain Association request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDomainAssociationResult { ///

Domain Association structure.

#[serde(rename = "domainAssociation")] pub domain_association: DomainAssociation, } +///

Request structure for create webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateWebhookRequest { + ///

Unique Id for an Amplify App.

+ #[serde(rename = "appId")] + pub app_id: String, + ///

Name for a branch, part of an Amplify App.

+ #[serde(rename = "branchName")] + pub branch_name: String, + ///

Description for a webhook.

+ #[serde(rename = "description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, +} + +///

Result structure for the create webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateWebhookResult { + ///

Webhook structure.

+ #[serde(rename = "webhook")] + pub webhook: Webhook, +} + ///

Custom rewrite / redirect rule.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CustomRule { @@ -342,7 +467,7 @@ pub struct DeleteAppRequest { ///

Result structure for an Amplify App delete request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAppResult { #[serde(rename = "app")] pub app: App, @@ -361,7 +486,7 @@ pub struct DeleteBranchRequest { ///

Result structure for delete branch request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBranchResult { ///

Branch structure for an Amplify App.

#[serde(rename = "branch")] @@ -380,7 +505,7 @@ pub struct DeleteDomainAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDomainAssociationResult { #[serde(rename = "domainAssociation")] pub domain_association: DomainAssociation, @@ -402,19 +527,37 @@ pub struct DeleteJobRequest { ///

Result structure for the delete job request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteJobResult { #[serde(rename = "jobSummary")] pub job_summary: JobSummary, } +///

Request structure for the delete webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteWebhookRequest { + ///

Unique Id for a webhook.

+ #[serde(rename = "webhookId")] + pub webhook_id: String, +} + +///

Result structure for the delete webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteWebhookResult { + ///

Webhook structure.

+ #[serde(rename = "webhook")] + pub webhook: Webhook, +} + ///

Structure for Domain Association, which associates a custom domain with an Amplify App.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainAssociation { ///

DNS Record for certificate verification.

#[serde(rename = "certificateVerificationDNSRecord")] - pub certificate_verification_dns_record: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub certificate_verification_dns_record: Option, ///

ARN for the Domain Association.

#[serde(rename = "domainAssociationArn")] pub domain_association_arn: String, @@ -444,7 +587,7 @@ pub struct GetAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAppResult { #[serde(rename = "app")] pub app: App, @@ -462,7 +605,7 @@ pub struct GetBranchRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBranchResult { #[serde(rename = "branch")] pub branch: Branch, @@ -481,7 +624,7 @@ pub struct GetDomainAssociationRequest { ///

Result structure for the get Domain Association request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainAssociationResult { ///

Domain Association structure.

#[serde(rename = "domainAssociation")] @@ -503,15 +646,32 @@ pub struct GetJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobResult { #[serde(rename = "job")] pub job: Job, } +///

Request structure for the get webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetWebhookRequest { + ///

Unique Id for a webhook.

+ #[serde(rename = "webhookId")] + pub webhook_id: String, +} + +///

Result structure for the get webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetWebhookResult { + ///

Webhook structure.

+ #[serde(rename = "webhook")] + pub webhook: Webhook, +} + ///

Structure for an execution job for an Amplify App.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

Execution steps for an execution job, for an Amplify App.

#[serde(rename = "steps")] @@ -523,7 +683,7 @@ pub struct Job { ///

Structure for the summary of a Job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobSummary { ///

Commit Id from 3rd party repository provider for the Job.

#[serde(rename = "commitId")] @@ -544,7 +704,7 @@ pub struct JobSummary { ///

Unique Id for the Job.

#[serde(rename = "jobId")] pub job_id: String, - ///

Type for the Job.

+ ///

Type for the Job. \n "RELEASE": Manually released from source by using StartJob API. "RETRY": Manually retried by using StartJob API. "WEB_HOOK": Automatically triggered by WebHooks.

#[serde(rename = "jobType")] pub job_type: String, ///

Start date / time for the Job.

@@ -570,7 +730,7 @@ pub struct ListAppsRequest { ///

Result structure for an Amplify App list request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAppsResult { ///

List of Amplify Apps.

#[serde(rename = "apps")] @@ -599,7 +759,7 @@ pub struct ListBranchesRequest { ///

Result structure for list branches request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBranchesResult { ///

List of branches for an Amplify App.

#[serde(rename = "branches")] @@ -628,7 +788,7 @@ pub struct ListDomainAssociationsRequest { ///

Result structure for the list Domain Association request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDomainAssociationsResult { ///

List of Domain Associations.

#[serde(rename = "domainAssociations")] @@ -660,7 +820,7 @@ pub struct ListJobsRequest { ///

Maximum number of records to list in a single response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResult { ///

Result structure for list job result request.

#[serde(rename = "jobSummaries")] @@ -671,9 +831,56 @@ pub struct ListJobsResult { pub next_token: Option, } +///

Request structure used to list tags for resource.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + ///

Resource arn used to list tags.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, +} + +///

Response for list tags.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + ///

Tags result for response.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +///

Request structure for the list webhooks request.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListWebhooksRequest { + ///

Unique Id for an Amplify App.

+ #[serde(rename = "appId")] + pub app_id: String, + ///

Maximum number of records to list in a single response.

+ #[serde(rename = "maxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Pagination token. Set to null to start listing webhooks from start. If non-null pagination token is returned in a result, then pass its value in here to list more webhooks.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +///

Result structure for the list webhooks request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListWebhooksResult { + ///

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

List of webhooks.

+ #[serde(rename = "webhooks")] + pub webhooks: Vec, +} + ///

Structure with Production Branch information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProductionBranch { ///

Branch Name for Production Branch.

#[serde(rename = "branchName")] @@ -687,12 +894,40 @@ pub struct ProductionBranch { #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - ///

Thumbnail Url for Production Branch.

+ ///

Thumbnail URL for Production Branch.

#[serde(rename = "thumbnailUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub thumbnail_url: Option, } +///

Request structure for start a deployment.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartDeploymentRequest { + ///

Unique Id for an Amplify App.

+ #[serde(rename = "appId")] + pub app_id: String, + ///

Name for the branch, for the Job.

+ #[serde(rename = "branchName")] + pub branch_name: String, + ///

The job id for this deployment, generated by create deployment request.

+ #[serde(rename = "jobId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_id: Option, + ///

The sourceUrl for this deployment, used when calling start deployment without create deployment. SourceUrl can be any HTTP GET url that is public accessible and downloads a single zip.

+ #[serde(rename = "sourceUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_url: Option, +} + +///

Result structure for start a deployment.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartDeploymentResult { + ///

Summary for the Job.

+ #[serde(rename = "jobSummary")] + pub job_summary: JobSummary, +} + ///

Request structure for Start job request.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartJobRequest { @@ -714,22 +949,22 @@ pub struct StartJobRequest { #[serde(rename = "commitTime")] #[serde(skip_serializing_if = "Option::is_none")] pub commit_time: Option, - ///

Unique Id for the Job.

+ ///

Unique Id for an existing job. Required for "RETRY" JobType.

#[serde(rename = "jobId")] #[serde(skip_serializing_if = "Option::is_none")] pub job_id: Option, - ///

Reason for the Job.

+ ///

Descriptive reason for starting this job.

#[serde(rename = "jobReason")] #[serde(skip_serializing_if = "Option::is_none")] pub job_reason: Option, - ///

Type for the Job.

+ ///

Type for the Job. Available JobTypes are: \n "RELEASE": Start a new job with the latest change from the specified branch. Only available for apps that have connected to a repository. "RETRY": Retry an existing job. JobId is required for this type of job.

#[serde(rename = "jobType")] pub job_type: String, } ///

Result structure for run job request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartJobResult { ///

Summary for the Job.

#[serde(rename = "jobSummary")] @@ -738,20 +973,24 @@ pub struct StartJobResult { ///

Structure for an execution step for an execution job, for an Amplify App.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Step { - ///

Url to teh artifact for the execution step.

+ ///

URL to the artifact for the execution step.

#[serde(rename = "artifactsUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub artifacts_url: Option, + ///

The context for current step, will include build image if step is build.

+ #[serde(rename = "context")] + #[serde(skip_serializing_if = "Option::is_none")] + pub context: Option, ///

End date/ time of the execution step.

#[serde(rename = "endTime")] pub end_time: f64, - ///

Url to the logs for the execution step.

+ ///

URL to the logs for the execution step.

#[serde(rename = "logUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub log_url: Option, - ///

List of screenshot Urls for the execution step, if relevant.

+ ///

List of screenshot URLs for the execution step, if relevant.

#[serde(rename = "screenshots")] #[serde(skip_serializing_if = "Option::is_none")] pub screenshots: Option<::std::collections::HashMap>, @@ -761,6 +1000,10 @@ pub struct Step { ///

Status of the execution step.

#[serde(rename = "status")] pub status: String, + ///

The reason for current step status.

+ #[serde(rename = "statusReason")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status_reason: Option, ///

Name of the execution step.

#[serde(rename = "stepName")] pub step_name: String, @@ -782,7 +1025,7 @@ pub struct StopJobRequest { ///

Result structure for the stop job request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopJobResult { ///

Summary for the Job.

#[serde(rename = "jobSummary")] @@ -791,7 +1034,7 @@ pub struct StopJobResult { ///

Subdomain for the Domain Association.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubDomain { ///

DNS record for the Subdomain.

#[serde(rename = "dnsRecord")] @@ -815,12 +1058,52 @@ pub struct SubDomainSetting { pub prefix: String, } +///

Request structure used to tag resource.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

Resource arn used to tag resource.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

Tags used to tag resource.

+ #[serde(rename = "tags")] + pub tags: ::std::collections::HashMap, +} + +///

Response for tag resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + +///

Request structure used to untag resource.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

Resource arn used to untag resource.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

Tag keys used to untag resource.

+ #[serde(rename = "tagKeys")] + pub tag_keys: Vec, +} + +///

Response for untag resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + ///

Request structure for update App request.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateAppRequest { ///

Unique Id for an Amplify App.

#[serde(rename = "appId")] pub app_id: String, + ///

Automated branch creation config for the Amplify App.

+ #[serde(rename = "autoBranchCreationConfig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub auto_branch_creation_config: Option, + ///

Automated branch creation glob patterns for the Amplify App.

+ #[serde(rename = "autoBranchCreationPatterns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub auto_branch_creation_patterns: Option>, ///

Basic Authorization credentials for an Amplify App.

#[serde(rename = "basicAuthCredentials")] #[serde(skip_serializing_if = "Option::is_none")] @@ -837,6 +1120,10 @@ pub struct UpdateAppRequest { #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Enables automated branch creation for the Amplify App.

+ #[serde(rename = "enableAutoBranchCreation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_auto_branch_creation: Option, ///

Enables Basic Authorization for an Amplify App.

#[serde(rename = "enableBasicAuth")] #[serde(skip_serializing_if = "Option::is_none")] @@ -865,7 +1152,7 @@ pub struct UpdateAppRequest { ///

Result structure for an Amplify App update request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAppResult { ///

App structure for the updated App.

#[serde(rename = "app")] @@ -893,6 +1180,10 @@ pub struct UpdateBranchRequest { #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Display name for a branch, will use as the default domain prefix.

+ #[serde(rename = "displayName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub display_name: Option, ///

Enables auto building for the branch.

#[serde(rename = "enableAutoBuild")] #[serde(skip_serializing_if = "Option::is_none")] @@ -925,7 +1216,7 @@ pub struct UpdateBranchRequest { ///

Result structure for update branch request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBranchResult { ///

Branch structure for an Amplify App.

#[serde(rename = "branch")] @@ -952,13 +1243,65 @@ pub struct UpdateDomainAssociationRequest { ///

Result structure for the update Domain Association request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainAssociationResult { ///

Domain Association structure.

#[serde(rename = "domainAssociation")] pub domain_association: DomainAssociation, } +///

Request structure for update webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateWebhookRequest { + ///

Name for a branch, part of an Amplify App.

+ #[serde(rename = "branchName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub branch_name: Option, + ///

Description for a webhook.

+ #[serde(rename = "description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

Unique Id for a webhook.

+ #[serde(rename = "webhookId")] + pub webhook_id: String, +} + +///

Result structure for the update webhook request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateWebhookResult { + ///

Webhook structure.

+ #[serde(rename = "webhook")] + pub webhook: Webhook, +} + +///

Structure for webhook, which associates a webhook with an Amplify App.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Webhook { + ///

Name for a branch, part of an Amplify App.

+ #[serde(rename = "branchName")] + pub branch_name: String, + ///

Create date / time for a webhook.

+ #[serde(rename = "createTime")] + pub create_time: f64, + ///

Description for a webhook.

+ #[serde(rename = "description")] + pub description: String, + ///

Update date / time for a webhook.

+ #[serde(rename = "updateTime")] + pub update_time: f64, + ///

ARN for the webhook.

+ #[serde(rename = "webhookArn")] + pub webhook_arn: String, + ///

Id of the webhook.

+ #[serde(rename = "webhookId")] + pub webhook_id: String, + ///

Url of the webhook.

+ #[serde(rename = "webhookUrl")] + pub webhook_url: String, +} + /// Errors returned by CreateApp #[derive(Debug, PartialEq)] pub enum CreateAppError { @@ -1081,6 +1424,57 @@ impl Error for CreateBranchError { } } } +/// Errors returned by CreateDeployment +#[derive(Debug, PartialEq)] +pub enum CreateDeploymentError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl CreateDeploymentError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(CreateDeploymentError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(CreateDeploymentError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateDeploymentError::LimitExceeded(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(CreateDeploymentError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateDeploymentError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateDeploymentError { + fn description(&self) -> &str { + match *self { + CreateDeploymentError::BadRequest(ref cause) => cause, + CreateDeploymentError::InternalFailure(ref cause) => cause, + CreateDeploymentError::LimitExceeded(ref cause) => cause, + CreateDeploymentError::Unauthorized(ref cause) => cause, + } + } +} /// Errors returned by CreateDomainAssociation #[derive(Debug, PartialEq)] pub enum CreateDomainAssociationError { @@ -1152,39 +1546,46 @@ impl Error for CreateDomainAssociationError { } } } -/// Errors returned by DeleteApp +/// Errors returned by CreateWebhook #[derive(Debug, PartialEq)] -pub enum DeleteAppError { +pub enum CreateWebhookError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

DependentServiceFailure(String), ///

Exception thrown when the service fails to perform an operation due to an internal issue.

InternalFailure(String), + ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), ///

Exception thrown when an entity has not been found during an operation.

NotFound(String), ///

Exception thrown when an operation fails due to a lack of access.

Unauthorized(String), } -impl DeleteAppError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateWebhookError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "BadRequestException" => { - return RusotoError::Service(DeleteAppError::BadRequest(err.msg)) + return RusotoError::Service(CreateWebhookError::BadRequest(err.msg)) } "DependentServiceFailureException" => { - return RusotoError::Service(DeleteAppError::DependentServiceFailure(err.msg)) + return RusotoError::Service(CreateWebhookError::DependentServiceFailure( + err.msg, + )) } "InternalFailureException" => { - return RusotoError::Service(DeleteAppError::InternalFailure(err.msg)) + return RusotoError::Service(CreateWebhookError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateWebhookError::LimitExceeded(err.msg)) } "NotFoundException" => { - return RusotoError::Service(DeleteAppError::NotFound(err.msg)) + return RusotoError::Service(CreateWebhookError::NotFound(err.msg)) } "UnauthorizedException" => { - return RusotoError::Service(DeleteAppError::Unauthorized(err.msg)) + return RusotoError::Service(CreateWebhookError::Unauthorized(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1193,25 +1594,26 @@ impl DeleteAppError { return RusotoError::Unknown(res); } } -impl fmt::Display for DeleteAppError { +impl fmt::Display for CreateWebhookError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteAppError { +impl Error for CreateWebhookError { fn description(&self) -> &str { match *self { - DeleteAppError::BadRequest(ref cause) => cause, - DeleteAppError::DependentServiceFailure(ref cause) => cause, - DeleteAppError::InternalFailure(ref cause) => cause, - DeleteAppError::NotFound(ref cause) => cause, - DeleteAppError::Unauthorized(ref cause) => cause, + CreateWebhookError::BadRequest(ref cause) => cause, + CreateWebhookError::DependentServiceFailure(ref cause) => cause, + CreateWebhookError::InternalFailure(ref cause) => cause, + CreateWebhookError::LimitExceeded(ref cause) => cause, + CreateWebhookError::NotFound(ref cause) => cause, + CreateWebhookError::Unauthorized(ref cause) => cause, } } } -/// Errors returned by DeleteBranch +/// Errors returned by DeleteApp #[derive(Debug, PartialEq)] -pub enum DeleteBranchError { +pub enum DeleteAppError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

@@ -1224,15 +1626,72 @@ pub enum DeleteBranchError { Unauthorized(String), } -impl DeleteBranchError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteAppError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "BadRequestException" => { - return RusotoError::Service(DeleteBranchError::BadRequest(err.msg)) + return RusotoError::Service(DeleteAppError::BadRequest(err.msg)) } "DependentServiceFailureException" => { - return RusotoError::Service(DeleteBranchError::DependentServiceFailure( + return RusotoError::Service(DeleteAppError::DependentServiceFailure(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(DeleteAppError::InternalFailure(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DeleteAppError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(DeleteAppError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteAppError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteAppError { + fn description(&self) -> &str { + match *self { + DeleteAppError::BadRequest(ref cause) => cause, + DeleteAppError::DependentServiceFailure(ref cause) => cause, + DeleteAppError::InternalFailure(ref cause) => cause, + DeleteAppError::NotFound(ref cause) => cause, + DeleteAppError::Unauthorized(ref cause) => cause, + } + } +} +/// Errors returned by DeleteBranch +#[derive(Debug, PartialEq)] +pub enum DeleteBranchError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

+ DependentServiceFailure(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl DeleteBranchError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteBranchError::BadRequest(err.msg)) + } + "DependentServiceFailureException" => { + return RusotoError::Service(DeleteBranchError::DependentServiceFailure( err.msg, )) } @@ -1388,6 +1847,63 @@ impl Error for DeleteJobError { } } } +/// Errors returned by DeleteWebhook +#[derive(Debug, PartialEq)] +pub enum DeleteWebhookError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl DeleteWebhookError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteWebhookError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(DeleteWebhookError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DeleteWebhookError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DeleteWebhookError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(DeleteWebhookError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteWebhookError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteWebhookError { + fn description(&self) -> &str { + match *self { + DeleteWebhookError::BadRequest(ref cause) => cause, + DeleteWebhookError::InternalFailure(ref cause) => cause, + DeleteWebhookError::LimitExceeded(ref cause) => cause, + DeleteWebhookError::NotFound(ref cause) => cause, + DeleteWebhookError::Unauthorized(ref cause) => cause, + } + } +} /// Errors returned by GetApp #[derive(Debug, PartialEq)] pub enum GetAppError { @@ -1596,6 +2112,63 @@ impl Error for GetJobError { } } } +/// Errors returned by GetWebhook +#[derive(Debug, PartialEq)] +pub enum GetWebhookError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl GetWebhookError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(GetWebhookError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(GetWebhookError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetWebhookError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(GetWebhookError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(GetWebhookError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetWebhookError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetWebhookError { + fn description(&self) -> &str { + match *self { + GetWebhookError::BadRequest(ref cause) => cause, + GetWebhookError::InternalFailure(ref cause) => cause, + GetWebhookError::LimitExceeded(ref cause) => cause, + GetWebhookError::NotFound(ref cause) => cause, + GetWebhookError::Unauthorized(ref cause) => cause, + } + } +} /// Errors returned by ListApps #[derive(Debug, PartialEq)] pub enum ListAppsError { @@ -1784,39 +2357,31 @@ impl Error for ListJobsError { } } } -/// Errors returned by StartJob +/// Errors returned by ListTagsForResource #[derive(Debug, PartialEq)] -pub enum StartJobError { +pub enum ListTagsForResourceError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), ///

Exception thrown when the service fails to perform an operation due to an internal issue.

InternalFailure(String), - ///

Exception thrown when a resource could not be created because of service limits.

- LimitExceeded(String), - ///

Exception thrown when an entity has not been found during an operation.

- NotFound(String), - ///

Exception thrown when an operation fails due to a lack of access.

- Unauthorized(String), + ///

Exception thrown when an operation fails due to non-existent resource.

+ ResourceNotFound(String), } -impl StartJobError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "BadRequestException" => { - return RusotoError::Service(StartJobError::BadRequest(err.msg)) + return RusotoError::Service(ListTagsForResourceError::BadRequest(err.msg)) } "InternalFailureException" => { - return RusotoError::Service(StartJobError::InternalFailure(err.msg)) + return RusotoError::Service(ListTagsForResourceError::InternalFailure(err.msg)) } - "LimitExceededException" => { - return RusotoError::Service(StartJobError::LimitExceeded(err.msg)) - } - "NotFoundException" => { - return RusotoError::Service(StartJobError::NotFound(err.msg)) - } - "UnauthorizedException" => { - return RusotoError::Service(StartJobError::Unauthorized(err.msg)) + "ResourceNotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::ResourceNotFound( + err.msg, + )) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1825,55 +2390,48 @@ impl StartJobError { return RusotoError::Unknown(res); } } -impl fmt::Display for StartJobError { +impl fmt::Display for ListTagsForResourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for StartJobError { +impl Error for ListTagsForResourceError { fn description(&self) -> &str { match *self { - StartJobError::BadRequest(ref cause) => cause, - StartJobError::InternalFailure(ref cause) => cause, - StartJobError::LimitExceeded(ref cause) => cause, - StartJobError::NotFound(ref cause) => cause, - StartJobError::Unauthorized(ref cause) => cause, + ListTagsForResourceError::BadRequest(ref cause) => cause, + ListTagsForResourceError::InternalFailure(ref cause) => cause, + ListTagsForResourceError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by StopJob +/// Errors returned by ListWebhooks #[derive(Debug, PartialEq)] -pub enum StopJobError { +pub enum ListWebhooksError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), ///

Exception thrown when the service fails to perform an operation due to an internal issue.

InternalFailure(String), ///

Exception thrown when a resource could not be created because of service limits.

LimitExceeded(String), - ///

Exception thrown when an entity has not been found during an operation.

- NotFound(String), ///

Exception thrown when an operation fails due to a lack of access.

Unauthorized(String), } -impl StopJobError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl ListWebhooksError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "BadRequestException" => { - return RusotoError::Service(StopJobError::BadRequest(err.msg)) + return RusotoError::Service(ListWebhooksError::BadRequest(err.msg)) } "InternalFailureException" => { - return RusotoError::Service(StopJobError::InternalFailure(err.msg)) + return RusotoError::Service(ListWebhooksError::InternalFailure(err.msg)) } "LimitExceededException" => { - return RusotoError::Service(StopJobError::LimitExceeded(err.msg)) - } - "NotFoundException" => { - return RusotoError::Service(StopJobError::NotFound(err.msg)) + return RusotoError::Service(ListWebhooksError::LimitExceeded(err.msg)) } "UnauthorizedException" => { - return RusotoError::Service(StopJobError::Unauthorized(err.msg)) + return RusotoError::Service(ListWebhooksError::Unauthorized(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1882,50 +2440,54 @@ impl StopJobError { return RusotoError::Unknown(res); } } -impl fmt::Display for StopJobError { +impl fmt::Display for ListWebhooksError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for StopJobError { +impl Error for ListWebhooksError { fn description(&self) -> &str { match *self { - StopJobError::BadRequest(ref cause) => cause, - StopJobError::InternalFailure(ref cause) => cause, - StopJobError::LimitExceeded(ref cause) => cause, - StopJobError::NotFound(ref cause) => cause, - StopJobError::Unauthorized(ref cause) => cause, + ListWebhooksError::BadRequest(ref cause) => cause, + ListWebhooksError::InternalFailure(ref cause) => cause, + ListWebhooksError::LimitExceeded(ref cause) => cause, + ListWebhooksError::Unauthorized(ref cause) => cause, } } } -/// Errors returned by UpdateApp +/// Errors returned by StartDeployment #[derive(Debug, PartialEq)] -pub enum UpdateAppError { +pub enum StartDeploymentError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), ///

Exception thrown when the service fails to perform an operation due to an internal issue.

InternalFailure(String), + ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), ///

Exception thrown when an entity has not been found during an operation.

NotFound(String), ///

Exception thrown when an operation fails due to a lack of access.

Unauthorized(String), } -impl UpdateAppError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl StartDeploymentError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "BadRequestException" => { - return RusotoError::Service(UpdateAppError::BadRequest(err.msg)) + return RusotoError::Service(StartDeploymentError::BadRequest(err.msg)) } "InternalFailureException" => { - return RusotoError::Service(UpdateAppError::InternalFailure(err.msg)) + return RusotoError::Service(StartDeploymentError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(StartDeploymentError::LimitExceeded(err.msg)) } "NotFoundException" => { - return RusotoError::Service(UpdateAppError::NotFound(err.msg)) + return RusotoError::Service(StartDeploymentError::NotFound(err.msg)) } "UnauthorizedException" => { - return RusotoError::Service(UpdateAppError::Unauthorized(err.msg)) + return RusotoError::Service(StartDeploymentError::Unauthorized(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1934,56 +2496,55 @@ impl UpdateAppError { return RusotoError::Unknown(res); } } -impl fmt::Display for UpdateAppError { +impl fmt::Display for StartDeploymentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for UpdateAppError { +impl Error for StartDeploymentError { fn description(&self) -> &str { match *self { - UpdateAppError::BadRequest(ref cause) => cause, - UpdateAppError::InternalFailure(ref cause) => cause, - UpdateAppError::NotFound(ref cause) => cause, - UpdateAppError::Unauthorized(ref cause) => cause, + StartDeploymentError::BadRequest(ref cause) => cause, + StartDeploymentError::InternalFailure(ref cause) => cause, + StartDeploymentError::LimitExceeded(ref cause) => cause, + StartDeploymentError::NotFound(ref cause) => cause, + StartDeploymentError::Unauthorized(ref cause) => cause, } } } -/// Errors returned by UpdateBranch +/// Errors returned by StartJob #[derive(Debug, PartialEq)] -pub enum UpdateBranchError { +pub enum StartJobError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), - ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

- DependentServiceFailure(String), ///

Exception thrown when the service fails to perform an operation due to an internal issue.

InternalFailure(String), + ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), ///

Exception thrown when an entity has not been found during an operation.

NotFound(String), ///

Exception thrown when an operation fails due to a lack of access.

Unauthorized(String), } -impl UpdateBranchError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl StartJobError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "BadRequestException" => { - return RusotoError::Service(UpdateBranchError::BadRequest(err.msg)) - } - "DependentServiceFailureException" => { - return RusotoError::Service(UpdateBranchError::DependentServiceFailure( - err.msg, - )) + return RusotoError::Service(StartJobError::BadRequest(err.msg)) } "InternalFailureException" => { - return RusotoError::Service(UpdateBranchError::InternalFailure(err.msg)) + return RusotoError::Service(StartJobError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(StartJobError::LimitExceeded(err.msg)) } "NotFoundException" => { - return RusotoError::Service(UpdateBranchError::NotFound(err.msg)) + return RusotoError::Service(StartJobError::NotFound(err.msg)) } "UnauthorizedException" => { - return RusotoError::Service(UpdateBranchError::Unauthorized(err.msg)) + return RusotoError::Service(StartJobError::Unauthorized(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1992,32 +2553,289 @@ impl UpdateBranchError { return RusotoError::Unknown(res); } } -impl fmt::Display for UpdateBranchError { +impl fmt::Display for StartJobError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for UpdateBranchError { +impl Error for StartJobError { fn description(&self) -> &str { match *self { - UpdateBranchError::BadRequest(ref cause) => cause, - UpdateBranchError::DependentServiceFailure(ref cause) => cause, - UpdateBranchError::InternalFailure(ref cause) => cause, - UpdateBranchError::NotFound(ref cause) => cause, - UpdateBranchError::Unauthorized(ref cause) => cause, + StartJobError::BadRequest(ref cause) => cause, + StartJobError::InternalFailure(ref cause) => cause, + StartJobError::LimitExceeded(ref cause) => cause, + StartJobError::NotFound(ref cause) => cause, + StartJobError::Unauthorized(ref cause) => cause, } } } -/// Errors returned by UpdateDomainAssociation +/// Errors returned by StopJob #[derive(Debug, PartialEq)] -pub enum UpdateDomainAssociationError { +pub enum StopJobError { ///

Exception thrown when a request contains unexpected data.

BadRequest(String), - ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

- DependentServiceFailure(String), ///

Exception thrown when the service fails to perform an operation due to an internal issue.

InternalFailure(String), - ///

Exception thrown when an entity has not been found during an operation.

+ ///

Exception thrown when a resource could not be created because of service limits.

+ LimitExceeded(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl StopJobError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(StopJobError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(StopJobError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(StopJobError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(StopJobError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(StopJobError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StopJobError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StopJobError { + fn description(&self) -> &str { + match *self { + StopJobError::BadRequest(ref cause) => cause, + StopJobError::InternalFailure(ref cause) => cause, + StopJobError::LimitExceeded(ref cause) => cause, + StopJobError::NotFound(ref cause) => cause, + StopJobError::Unauthorized(ref cause) => cause, + } + } +} +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an operation fails due to non-existent resource.

+ ResourceNotFound(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(TagResourceError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(TagResourceError::InternalFailure(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(TagResourceError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::BadRequest(ref cause) => cause, + TagResourceError::InternalFailure(ref cause) => cause, + TagResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an operation fails due to non-existent resource.

+ ResourceNotFound(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UntagResourceError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(UntagResourceError::InternalFailure(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UntagResourceError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::BadRequest(ref cause) => cause, + UntagResourceError::InternalFailure(ref cause) => cause, + UntagResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UpdateApp +#[derive(Debug, PartialEq)] +pub enum UpdateAppError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl UpdateAppError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateAppError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(UpdateAppError::InternalFailure(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateAppError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(UpdateAppError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateAppError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateAppError { + fn description(&self) -> &str { + match *self { + UpdateAppError::BadRequest(ref cause) => cause, + UpdateAppError::InternalFailure(ref cause) => cause, + UpdateAppError::NotFound(ref cause) => cause, + UpdateAppError::Unauthorized(ref cause) => cause, + } + } +} +/// Errors returned by UpdateBranch +#[derive(Debug, PartialEq)] +pub enum UpdateBranchError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

+ DependentServiceFailure(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl UpdateBranchError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateBranchError::BadRequest(err.msg)) + } + "DependentServiceFailureException" => { + return RusotoError::Service(UpdateBranchError::DependentServiceFailure( + err.msg, + )) + } + "InternalFailureException" => { + return RusotoError::Service(UpdateBranchError::InternalFailure(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateBranchError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(UpdateBranchError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateBranchError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateBranchError { + fn description(&self) -> &str { + match *self { + UpdateBranchError::BadRequest(ref cause) => cause, + UpdateBranchError::DependentServiceFailure(ref cause) => cause, + UpdateBranchError::InternalFailure(ref cause) => cause, + UpdateBranchError::NotFound(ref cause) => cause, + UpdateBranchError::Unauthorized(ref cause) => cause, + } + } +} +/// Errors returned by UpdateDomainAssociation +#[derive(Debug, PartialEq)] +pub enum UpdateDomainAssociationError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

+ DependentServiceFailure(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an entity has not been found during an operation.

NotFound(String), ///

Exception thrown when an operation fails due to a lack of access.

Unauthorized(String), @@ -2071,6 +2889,65 @@ impl Error for UpdateDomainAssociationError { } } } +/// Errors returned by UpdateWebhook +#[derive(Debug, PartialEq)] +pub enum UpdateWebhookError { + ///

Exception thrown when a request contains unexpected data.

+ BadRequest(String), + ///

Exception thrown when an operation fails due to a dependent service throwing an exception.

+ DependentServiceFailure(String), + ///

Exception thrown when the service fails to perform an operation due to an internal issue.

+ InternalFailure(String), + ///

Exception thrown when an entity has not been found during an operation.

+ NotFound(String), + ///

Exception thrown when an operation fails due to a lack of access.

+ Unauthorized(String), +} + +impl UpdateWebhookError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateWebhookError::BadRequest(err.msg)) + } + "DependentServiceFailureException" => { + return RusotoError::Service(UpdateWebhookError::DependentServiceFailure( + err.msg, + )) + } + "InternalFailureException" => { + return RusotoError::Service(UpdateWebhookError::InternalFailure(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateWebhookError::NotFound(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(UpdateWebhookError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateWebhookError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateWebhookError { + fn description(&self) -> &str { + match *self { + UpdateWebhookError::BadRequest(ref cause) => cause, + UpdateWebhookError::DependentServiceFailure(ref cause) => cause, + UpdateWebhookError::InternalFailure(ref cause) => cause, + UpdateWebhookError::NotFound(ref cause) => cause, + UpdateWebhookError::Unauthorized(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the Amplify API. Amplify clients implement this trait. pub trait Amplify { ///

Creates a new Amplify App.

@@ -2082,12 +2959,24 @@ pub trait Amplify { input: CreateBranchRequest, ) -> RusotoFuture; + ///

Create a deployment for manual deploy apps. (Apps are not connected to repository)

+ fn create_deployment( + &self, + input: CreateDeploymentRequest, + ) -> RusotoFuture; + ///

Create a new DomainAssociation on an App

fn create_domain_association( &self, input: CreateDomainAssociationRequest, ) -> RusotoFuture; + ///

Create a new webhook on an App.

+ fn create_webhook( + &self, + input: CreateWebhookRequest, + ) -> RusotoFuture; + ///

Delete an existing Amplify App by appId.

fn delete_app(&self, input: DeleteAppRequest) -> RusotoFuture; @@ -2106,6 +2995,12 @@ pub trait Amplify { ///

Delete a job, for an Amplify branch, part of Amplify App.

fn delete_job(&self, input: DeleteJobRequest) -> RusotoFuture; + ///

Deletes a webhook.

+ fn delete_webhook( + &self, + input: DeleteWebhookRequest, + ) -> RusotoFuture; + ///

Retrieves an existing Amplify App by appId.

fn get_app(&self, input: GetAppRequest) -> RusotoFuture; @@ -2121,6 +3016,12 @@ pub trait Amplify { ///

Get a job for a branch, part of an Amplify App.

fn get_job(&self, input: GetJobRequest) -> RusotoFuture; + ///

Retrieves webhook info that corresponds to a webhookId.

+ fn get_webhook( + &self, + input: GetWebhookRequest, + ) -> RusotoFuture; + ///

Lists existing Amplify Apps.

fn list_apps(&self, input: ListAppsRequest) -> RusotoFuture; @@ -2139,12 +3040,42 @@ pub trait Amplify { ///

List Jobs for a branch, part of an Amplify App.

fn list_jobs(&self, input: ListJobsRequest) -> RusotoFuture; + ///

List tags for resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + + ///

List webhooks with an app.

+ fn list_webhooks( + &self, + input: ListWebhooksRequest, + ) -> RusotoFuture; + + ///

Start a deployment for manual deploy apps. (Apps are not connected to repository)

+ fn start_deployment( + &self, + input: StartDeploymentRequest, + ) -> RusotoFuture; + ///

Starts a new job for a branch, part of an Amplify App.

fn start_job(&self, input: StartJobRequest) -> RusotoFuture; ///

Stop a job that is in progress, for an Amplify branch, part of Amplify App.

fn stop_job(&self, input: StopJobRequest) -> RusotoFuture; + ///

Tag resource with tag key and value.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + ///

Untag resource with resourceArn.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + ///

Updates an existing Amplify App.

fn update_app(&self, input: UpdateAppRequest) -> RusotoFuture; @@ -2159,6 +3090,12 @@ pub trait Amplify { &self, input: UpdateDomainAssociationRequest, ) -> RusotoFuture; + + ///

Update a webhook.

+ fn update_webhook( + &self, + input: UpdateWebhookRequest, + ) -> RusotoFuture; } /// A client for the Amplify API. #[derive(Clone)] @@ -2172,10 +3109,7 @@ impl AmplifyClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AmplifyClient { - AmplifyClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2189,10 +3123,14 @@ impl AmplifyClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AmplifyClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AmplifyClient { + AmplifyClient { client, region } } } @@ -2258,6 +3196,42 @@ impl Amplify for AmplifyClient { }) } + ///

Create a deployment for manual deploy apps. (Apps are not connected to repository)

+ fn create_deployment( + &self, + input: CreateDeploymentRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/apps/{app_id}/branches/{branch_name}/deployments", + app_id = input.app_id, + branch_name = input.branch_name + ); + + let mut request = SignedRequest::new("POST", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateDeploymentError::from_response(response))), + ) + } + }) + } + ///

Create a new DomainAssociation on an App

fn create_domain_association( &self, @@ -2287,6 +3261,38 @@ impl Amplify for AmplifyClient { }) } + ///

Create a new webhook on an App.

+ fn create_webhook( + &self, + input: CreateWebhookRequest, + ) -> RusotoFuture { + let request_uri = format!("/apps/{app_id}/webhooks", app_id = input.app_id); + + let mut request = SignedRequest::new("POST", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateWebhookError::from_response(response))), + ) + } + }) + } + ///

Delete an existing Amplify App by appId.

fn delete_app(&self, input: DeleteAppRequest) -> RusotoFuture { let request_uri = format!("/apps/{app_id}", app_id = input.app_id); @@ -2407,6 +3413,35 @@ impl Amplify for AmplifyClient { }) } + ///

Deletes a webhook.

+ fn delete_webhook( + &self, + input: DeleteWebhookRequest, + ) -> RusotoFuture { + let request_uri = format!("/webhooks/{webhook_id}", webhook_id = input.webhook_id); + + let mut request = SignedRequest::new("DELETE", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteWebhookError::from_response(response))), + ) + } + }) + } + ///

Retrieves an existing Amplify App by appId.

fn get_app(&self, input: GetAppRequest) -> RusotoFuture { let request_uri = format!("/apps/{app_id}", app_id = input.app_id); @@ -2526,6 +3561,35 @@ impl Amplify for AmplifyClient { }) } + ///

Retrieves webhook info that corresponds to a webhookId.

+ fn get_webhook( + &self, + input: GetWebhookRequest, + ) -> RusotoFuture { + let request_uri = format!("/webhooks/{webhook_id}", webhook_id = input.webhook_id); + + let mut request = SignedRequest::new("GET", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetWebhookError::from_response(response))), + ) + } + }) + } + ///

Lists existing Amplify Apps.

fn list_apps(&self, input: ListAppsRequest) -> RusotoFuture { let request_uri = "/apps"; @@ -2675,6 +3739,108 @@ impl Amplify for AmplifyClient { }) } + ///

List tags for resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("GET", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + + ///

List webhooks with an app.

+ fn list_webhooks( + &self, + input: ListWebhooksRequest, + ) -> RusotoFuture { + let request_uri = format!("/apps/{app_id}/webhooks", app_id = input.app_id); + + let mut request = SignedRequest::new("GET", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("maxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListWebhooksError::from_response(response))), + ) + } + }) + } + + ///

Start a deployment for manual deploy apps. (Apps are not connected to repository)

+ fn start_deployment( + &self, + input: StartDeploymentRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/apps/{app_id}/branches/{branch_name}/deployments/start", + app_id = input.app_id, + branch_name = input.branch_name + ); + + let mut request = SignedRequest::new("POST", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(StartDeploymentError::from_response(response))), + ) + } + }) + } + ///

Starts a new job for a branch, part of an Amplify App.

fn start_job(&self, input: StartJobRequest) -> RusotoFuture { let request_uri = format!( @@ -2739,6 +3905,73 @@ impl Amplify for AmplifyClient { }) } + ///

Tag resource with tag key and value.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("POST", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Untag resource with resourceArn.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("DELETE", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + for item in input.tag_keys.iter() { + params.put("tagKeys", item); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + ///

Updates an existing Amplify App.

fn update_app(&self, input: UpdateAppRequest) -> RusotoFuture { let request_uri = format!("/apps/{app_id}", app_id = input.app_id); @@ -2836,4 +4069,36 @@ impl Amplify for AmplifyClient { } }) } + + ///

Update a webhook.

+ fn update_webhook( + &self, + input: UpdateWebhookRequest, + ) -> RusotoFuture { + let request_uri = format!("/webhooks/{webhook_id}", webhook_id = input.webhook_id); + + let mut request = SignedRequest::new("POST", "amplify", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateWebhookError::from_response(response))), + ) + } + }) + } } diff --git a/rusoto/services/apigateway/Cargo.toml b/rusoto/services/apigateway/Cargo.toml index 1b43f65aad0..080b7fdcf47 100644 --- a/rusoto/services/apigateway/Cargo.toml +++ b/rusoto/services/apigateway/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_apigateway" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/apigateway/README.md b/rusoto/services/apigateway/README.md index c92a7733bf4..3e1817110c4 100644 --- a/rusoto/services/apigateway/README.md +++ b/rusoto/services/apigateway/README.md @@ -23,9 +23,16 @@ To use `rusoto_apigateway` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_apigateway = "0.40.0" +rusoto_apigateway = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/apigateway/src/custom/mod.rs b/rusoto/services/apigateway/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/apigateway/src/custom/mod.rs +++ b/rusoto/services/apigateway/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/apigateway/src/generated.rs b/rusoto/services/apigateway/src/generated.rs index 40167d3c8e4..f2edde255d7 100644 --- a/rusoto/services/apigateway/src/generated.rs +++ b/rusoto/services/apigateway/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

Access log settings, including the access log format and access log destination ARN.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccessLogSettings { ///

The ARN of the CloudWatch Logs log group to receive access logs.

#[serde(rename = "destinationArn")] @@ -41,7 +40,7 @@ pub struct AccessLogSettings { ///

Represents an AWS account that is associated with API Gateway.

To view the account info, call GET on this resource.

Error Codes

The following exception may be thrown when the request fails.

  • UnauthorizedException
  • NotFoundException
  • TooManyRequestsException

For detailed error code information, including the corresponding HTTP Status Codes, see API Gateway Error Codes

Example: Get the information about an account.

Request
GET /account HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160531T184618Z Authorization: AWS4-HMAC-SHA256 Credential={accesskeyID}/us-east-1/apigateway/aws4request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4hash} 
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/account-apigateway-{rel}.html", "name": "account", "templated": true }, "self": { "href": "/account" }, "account:update": { "href": "/account" } }, "cloudwatchRoleArn": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "throttleSettings": { "rateLimit": 500, "burstLimit": 1000 } } 

In addition to making the REST API call directly, you can use the AWS CLI and an AWS SDK to access this resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Account { ///

The version of the API keys used for the account.

#[serde(rename = "apiKeyVersion")] @@ -63,7 +62,7 @@ pub struct Account { ///

A resource that can be distributed to callers for executing Method resources that require an API key. API keys can be mapped to any Stage on any RestApi, which indicates that the callers with the API key can make requests to that stage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApiKey { ///

The timestamp when the API Key was created.

#[serde(rename = "createdDate")] @@ -109,7 +108,7 @@ pub struct ApiKey { ///

The identifier of an ApiKey used in a UsagePlan.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApiKeyIds { ///

A list of all the ApiKey identifiers.

#[serde(rename = "ids")] @@ -123,7 +122,7 @@ pub struct ApiKeyIds { ///

Represents a collection of API keys as represented by an ApiKeys resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApiKeys { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -155,9 +154,9 @@ pub struct ApiStage { pub throttle: Option<::std::collections::HashMap>, } -///

Represents an authorization layer for methods. If enabled on a method, API Gateway will activate the authorizer when a client calls the method.

+///

Represents an authorization layer for methods. If enabled on a method, API Gateway will activate the authorizer when a client calls the method.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Authorizer { ///

Optional customer-defined field, used in OpenAPI imports and exports without functional impact.

#[serde(rename = "authType")] @@ -183,7 +182,7 @@ pub struct Authorizer { #[serde(rename = "identitySource")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_source: Option, - ///

A validation expression for the incoming identity token. For TOKEN authorizers, this value is a regular expression. API Gateway will match the aud field of the incoming token from the client against the specified regular expression. It will invoke the authorizer's Lambda function when there is a match. Otherwise, it will return a 401 Unauthorized response without calling the Lambda function. The validation expression does not apply to the REQUEST authorizer.

+ ///

A validation expression for the incoming identity token. For TOKEN authorizers, this value is a regular expression. For COGNITO_USER_POOLS authorizers, API Gateway will match the aud field of the incoming token from the client against the specified regular expression. It will invoke the authorizer's Lambda function when there is a match. Otherwise, it will return a 401 Unauthorized response without calling the Lambda function. The validation expression does not apply to the REQUEST authorizer.

#[serde(rename = "identityValidationExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_validation_expression: Option, @@ -201,9 +200,9 @@ pub struct Authorizer { pub type_: Option, } -///

Represents a collection of Authorizer resources.

+///

Represents a collection of Authorizer resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Authorizers { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -216,7 +215,7 @@ pub struct Authorizers { ///

Represents the base path that callers of the API must provide as part of the URL after the domain name.

A custom domain name plus a BasePathMapping specification identifies a deployed RestApi in a given stage of the owner Account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BasePathMapping { ///

The base path name that callers of the API must provide as part of the URL after the domain name.

#[serde(rename = "basePath")] @@ -234,7 +233,7 @@ pub struct BasePathMapping { ///

Represents a collection of BasePathMapping resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BasePathMappings { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -268,7 +267,7 @@ pub struct CanarySettings { ///

Represents a client certificate used to configure client-side SSL authentication while sending requests to the integration endpoint.

Client certificates are used to authenticate an API by the backend server. To authenticate an API client (or user), use IAM roles and policies, a custom Authorizer or an Amazon Cognito user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClientCertificate { ///

The identifier of the client certificate.

#[serde(rename = "clientCertificateId")] @@ -298,7 +297,7 @@ pub struct ClientCertificate { ///

Represents a collection of ClientCertificate resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClientCertificates { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -369,7 +368,7 @@ pub struct CreateAuthorizerRequest { #[serde(rename = "identitySource")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_source: Option, - ///

A validation expression for the incoming identity token. For TOKEN authorizers, this value is a regular expression. API Gateway will match the aud field of the incoming token from the client against the specified regular expression. It will invoke the authorizer's Lambda function when there is a match. Otherwise, it will return a 401 Unauthorized response without calling the Lambda function. The validation expression does not apply to the REQUEST authorizer.

+ ///

A validation expression for the incoming identity token. For TOKEN authorizers, this value is a regular expression. For COGNITO_USER_POOLS authorizers, API Gateway will match the aud field of the incoming token from the client against the specified regular expression. It will invoke the authorizer's Lambda function when there is a match. Otherwise, it will return a 401 Unauthorized response without calling the Lambda function. The validation expression does not apply to the REQUEST authorizer.

#[serde(rename = "identityValidationExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_validation_expression: Option, @@ -391,7 +390,7 @@ pub struct CreateAuthorizerRequest { ///

Requests API Gateway to create a new BasePathMapping resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateBasePathMappingRequest { - ///

The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify a base path name after the domain name.

+ ///

The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Specify '(none)' if you do not want callers to specify a base path name after the domain name.

#[serde(rename = "basePath")] #[serde(skip_serializing_if = "Option::is_none")] pub base_path: Option, @@ -401,7 +400,7 @@ pub struct CreateBasePathMappingRequest { ///

[Required] The string identifier of the associated RestApi.

#[serde(rename = "restApiId")] pub rest_api_id: String, - ///

The name of the API's stage that you want to use for this mapping. Leave this blank if you do not want callers to explicitly specify the stage name after any base path name.

+ ///

The name of the API's stage that you want to use for this mapping. Specify '(none)' if you do not want callers to explicitly specify the stage name after any base path name.

#[serde(rename = "stage")] #[serde(skip_serializing_if = "Option::is_none")] pub stage: Option, @@ -518,6 +517,10 @@ pub struct CreateDomainNameRequest { #[serde(rename = "regionalCertificateName")] #[serde(skip_serializing_if = "Option::is_none")] pub regional_certificate_name: Option, + ///

The Transport Layer Security (TLS) version + cipher suite for this DomainName. The valid values are TLS_1_0 and TLS_1_2.

+ #[serde(rename = "securityPolicy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub security_policy: Option, ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -653,7 +656,7 @@ pub struct CreateStageRequest { ///

[Required] The string identifier of the associated RestApi.

#[serde(rename = "restApiId")] pub rest_api_id: String, - ///

[Required] The name for the Stage resource.

+ ///

[Required] The name for the Stage resource. Stage names can only contain alphanumeric characters, hyphens, and underscores. Maximum length is 128 characters.

#[serde(rename = "stageName")] pub stage_name: String, ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters.

@@ -753,7 +756,7 @@ pub struct DeleteAuthorizerRequest { ///

A request to delete the BasePathMapping resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteBasePathMappingRequest { - ///

[Required] The base path name of the BasePathMapping resource to delete.

+ ///

[Required] The base path name of the BasePathMapping resource to delete.

To specify an empty base path, set this parameter to '(none)'.

#[serde(rename = "basePath")] pub base_path: String, ///

[Required] The domain name of the BasePathMapping resource to delete.

@@ -964,7 +967,7 @@ pub struct DeleteVpcLinkRequest { ///

An immutable representation of a RestApi resource that can be called by users using Stages. A deployment must be associated with a Stage for it to be callable over the Internet.

To create a deployment, call POST on the Deployments resource of a RestApi. To view, update, or delete a deployment, call GET, PATCH, or DELETE on the specified deployment resource (/restapis/{restapiid}/deployments/{deploymentid}).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Deployment { ///

A summary of the RestApi at the date and time that the deployment resource was created.

#[serde(rename = "apiSummary")] @@ -1005,7 +1008,7 @@ pub struct DeploymentCanarySettings { ///

Represents a collection resource that contains zero or more references to your existing deployments, and links that guide you on how to interact with your collection. The collection offers a paginated view of the contained deployments.

To create a new deployment of a RestApi, make a POST request against this resource. To view, update, or delete an existing deployment, make a GET, PATCH, or DELETE request, respectively, on a specified Deployment resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Deployments { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -1018,7 +1021,7 @@ pub struct Deployments { ///

A documentation part for a targeted API entity.

A documentation part consists of a content map (properties) and a target (location). The target specifies an API entity to which the documentation content applies. The supported API entity types are API, AUTHORIZER, MODEL, RESOURCE, METHOD, PATHPARAMETER, QUERYPARAMETER, REQUESTHEADER, REQUESTBODY, RESPONSE, RESPONSEHEADER, and RESPONSEBODY. Valid location fields depend on the API entity type. All valid fields are not required.

The content map is a JSON string of API-specific key-value pairs. Although an API can use any shape for the content map, only the OpenAPI-compliant documentation fields will be injected into the associated API entity definition in the exported OpenAPI definition file.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentationPart { ///

The DocumentationPart identifier, generated by API Gateway when the DocumentationPart is created.

#[serde(rename = "id")] @@ -1036,7 +1039,7 @@ pub struct DocumentationPart { ///

A collection of the imported DocumentationPart identifiers.

This is used to return the result when documentation parts in an external (e.g., OpenAPI) file are imported into API Gateway

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentationPartIds { ///

A list of the returned documentation part identifiers.

#[serde(rename = "ids")] @@ -1074,7 +1077,7 @@ pub struct DocumentationPartLocation { ///

The collection of documentation parts of an API.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentationParts { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -1087,7 +1090,7 @@ pub struct DocumentationParts { ///

A snapshot of the documentation of an API.

Publishing API documentation involves creating a documentation version associated with an API stage and exporting the versioned documentation to an external (e.g., OpenAPI) file.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentationVersion { ///

The date when the API documentation snapshot is created.

#[serde(rename = "createdDate")] @@ -1105,7 +1108,7 @@ pub struct DocumentationVersion { ///

The collection of documentation snapshots of an API.

Use the DocumentationVersions to manage documentation snapshots associated with various API stages.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentationVersions { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -1118,7 +1121,7 @@ pub struct DocumentationVersions { ///

Represents a custom domain name as a user-friendly host name of an API (RestApi).

When you deploy an API, API Gateway creates a default host name for the API. This default API host name is of the {restapi-id}.execute-api.{region}.amazonaws.com format. With the default host name, you can access the API's root resource with the URL of https://{restapi-id}.execute-api.{region}.amazonaws.com/{stage}/. When you set up a custom domain name of apis.example.com for this API, you can then access the same resource using the URL of the https://apis.examples.com/myApi, where myApi is the base path mapping (BasePathMapping) of your API under the custom domain name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainName { ///

The reference to an AWS-managed certificate that will be used by edge-optimized endpoint for this domain name. AWS Certificate Manager is the only supported source.

#[serde(rename = "certificateArn")] @@ -1144,6 +1147,14 @@ pub struct DomainName { #[serde(rename = "domainName")] #[serde(skip_serializing_if = "Option::is_none")] pub domain_name: Option, + ///

The status of the DomainName migration. The valid values are AVAILABLE and UPDATING. If the status is UPDATING, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE, the domain can be updated.

+ #[serde(rename = "domainNameStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub domain_name_status: Option, + ///

An optional text message containing detailed information about status of the DomainName migration.

+ #[serde(rename = "domainNameStatusMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub domain_name_status_message: Option, ///

The endpoint configuration of this DomainName showing the endpoint types of the domain name.

#[serde(rename = "endpointConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1164,6 +1175,10 @@ pub struct DomainName { #[serde(rename = "regionalHostedZoneId")] #[serde(skip_serializing_if = "Option::is_none")] pub regional_hosted_zone_id: Option, + ///

The Transport Layer Security (TLS) version + cipher suite for this DomainName. The valid values are TLS_1_0 and TLS_1_2.

+ #[serde(rename = "securityPolicy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub security_policy: Option, ///

The collection of tags. Each tag element is associated with a given resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1172,7 +1187,7 @@ pub struct DomainName { ///

Represents a collection of DomainName resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainNames { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -1227,7 +1242,7 @@ pub struct FlushStageCacheRequest { ///

A gateway response of a given response type and status code, with optional response parameters and mapping templates.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get a Gateway Response of a given response type

Request

This example shows how to get a gateway response of the MISSINGAUTHENTICATIONTOKEN type.

GET /restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4request, SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 

The response type is specified as a URL path.

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ "links": { "curies": { "href": "http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html", "name": "gatewayresponse", "templated": true }, "self": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN" }, "gatewayresponse:delete": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN" } }, "defaultResponse": false, "responseParameters": { "gatewayresponse.header.x-request-path": "method.request.path.petId", "gatewayresponse.header.Access-Control-Allow-Origin": "'a.b.c'", "gatewayresponse.header.x-request-query": "method.request.querystring.q", "gatewayresponse.header.x-request-header": "method.request.header.Accept" }, "responseTemplates": { "application/json": "{\n "message": $context.error.messageString,\n "type": "$context.error.responseType",\n "stage": "$context.stage",\n "resourcePath": "$context.resourcePath",\n "stageVariables.a": "$stageVariables.a",\n "statusCode": "'404'"\n}" }, "responseType": "MISSINGAUTHENTICATION_TOKEN", "statusCode": "404" }

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GatewayResponse { ///

A Boolean flag to indicate whether this GatewayResponse is the default gateway response (true) or not (false). A default gateway response is one generated by API Gateway without any customization by an API developer.

#[serde(rename = "defaultResponse")] @@ -1253,7 +1268,7 @@ pub struct GatewayResponse { ///

The collection of the GatewayResponse instances of a RestApi as a responseType-to-GatewayResponse object map of key-value pairs. As such, pagination is not supported for querying this collection.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get the collection of gateway responses of an API

Request

This example request shows how to retrieve the GatewayResponses collection from an API.

GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4request, SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ "links": { "curies": { "href": "http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html", "name": "gatewayresponse", "templated": true }, "self": { "href": "/restapis/o81lxisefl/gatewayresponses" }, "first": { "href": "/restapis/o81lxisefl/gatewayresponses" }, "gatewayresponse:by-type": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "item": [ { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATIONFAILURE" }, { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCENOTFOUND" }, { "href": "/restapis/o81lxisefl/gatewayresponses/REQUESTTOOLARGE" }, { "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTEDMEDIATYPE" }, { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZERCONFIGURATIONERROR" }, { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT5XX" }, { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT4XX" }, { "href": "/restapis/o81lxisefl/gatewayresponses/BADREQUESTPARAMETERS" }, { "href": "/restapis/o81lxisefl/gatewayresponses/BADREQUESTBODY" }, { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIREDTOKEN" }, { "href": "/restapis/o81lxisefl/gatewayresponses/ACCESSDENIED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INVALIDAPIKEY" }, { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/APICONFIGURATIONERROR" }, { "href": "/restapis/o81lxisefl/gatewayresponses/QUOTAEXCEEDED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATIONTIMEOUT" }, { "href": "/restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INVALIDSIGNATURE" }, { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZERFAILURE" } ] }, "embedded": { "item": [ { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATIONFAILURE" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATIONFAILURE" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "INTEGRATIONFAILURE", "statusCode": "504" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCENOTFOUND" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCENOTFOUND" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "RESOURCENOTFOUND", "statusCode": "404" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/REQUESTTOOLARGE" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/REQUESTTOOLARGE" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "REQUESTTOOLARGE", "statusCode": "413" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "THROTTLED", "statusCode": "429" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTEDMEDIATYPE" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTEDMEDIATYPE" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "UNSUPPORTEDMEDIATYPE", "statusCode": "415" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZERCONFIGURATIONERROR" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZERCONFIGURATIONERROR" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "AUTHORIZERCONFIGURATIONERROR", "statusCode": "500" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT5XX" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT5XX" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "DEFAULT5XX" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT4XX" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT4XX" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "DEFAULT4XX" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/BADREQUESTPARAMETERS" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/BADREQUESTPARAMETERS" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "BADREQUESTPARAMETERS", "statusCode": "400" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/BADREQUESTBODY" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/BADREQUESTBODY" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "BADREQUESTBODY", "statusCode": "400" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIREDTOKEN" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIREDTOKEN" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "EXPIREDTOKEN", "statusCode": "403" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/ACCESSDENIED" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/ACCESSDENIED" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "ACCESSDENIED", "statusCode": "403" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALIDAPIKEY" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALIDAPIKEY" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "INVALIDAPIKEY", "statusCode": "403" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "UNAUTHORIZED", "statusCode": "401" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/APICONFIGURATIONERROR" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/APICONFIGURATIONERROR" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "APICONFIGURATIONERROR", "statusCode": "500" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/QUOTAEXCEEDED" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/QUOTAEXCEEDED" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "QUOTAEXCEEDED", "statusCode": "429" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATIONTIMEOUT" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATIONTIMEOUT" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "INTEGRATIONTIMEOUT", "statusCode": "504" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSINGAUTHENTICATIONTOKEN" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "MISSINGAUTHENTICATIONTOKEN", "statusCode": "403" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALIDSIGNATURE" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALIDSIGNATURE" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "INVALIDSIGNATURE", "statusCode": "403" }, { "links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZERFAILURE" }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{responsetype}", "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZERFAILURE" } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": { "application/json": "{"message":$context.error.messageString}" }, "responseType": "AUTHORIZERFAILURE", "statusCode": "500" } ] } }

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GatewayResponses { ///

Returns the entire collection, because of no pagination support.

#[serde(rename = "items")] @@ -1348,7 +1363,7 @@ pub struct GetAuthorizersRequest { ///

Request to describe a BasePathMapping resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetBasePathMappingRequest { - ///

[Required] The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify any base path name after the domain name.

+ ///

[Required] The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Specify '(none)' if you do not want callers to specify any base path name after the domain name.

#[serde(rename = "basePath")] pub base_path: String, ///

[Required] The domain name of the BasePathMapping resource to be described.

@@ -1824,7 +1839,7 @@ pub struct GetTagsRequest { #[serde(rename = "position")] #[serde(skip_serializing_if = "Option::is_none")] pub position: Option, - ///

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded. At present, Stage is the only taggable resource.

+ ///

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded.

#[serde(rename = "resourceArn")] pub resource_arn: String, } @@ -1991,7 +2006,7 @@ pub struct ImportRestApiRequest { #[serde(rename = "failOnWarnings")] #[serde(skip_serializing_if = "Option::is_none")] pub fail_on_warnings: Option, - ///

A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

To exclude DocumentationParts from the import, set parameters as ignore=documentation.

To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

To handle imported basePath, set parameters as basePath=ignore, basePath=prepend or basePath=split.

For example, the AWS CLI command to exclude documentation from the imported API is:

aws apigateway import-rest-api --parameters ignore=documentation --body 'file:///path/to/imported-api-body.json'

The AWS CLI command to set the regional endpoint on the imported API is:

aws apigateway import-rest-api --parameters endpointConfigurationTypes=REGIONAL --body 'file:///path/to/imported-api-body.json'

+ ///

A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

To exclude DocumentationParts from the import, set parameters as ignore=documentation.

To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

To handle imported basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split.

For example, the AWS CLI command to exclude documentation from the imported API is:

aws apigateway import-rest-api --parameters ignore=documentation --body 'file:///path/to/imported-api-body.json'

The AWS CLI command to set the regional endpoint on the imported API is:

aws apigateway import-rest-api --parameters endpointConfigurationTypes=REGIONAL --body 'file:///path/to/imported-api-body.json'

#[serde(rename = "parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap>, @@ -1999,13 +2014,13 @@ pub struct ImportRestApiRequest { ///

Represents an HTTP, HTTPPROXY, AWS, AWSPROXY, or Mock integration.

In the API Gateway console, the built-in Lambda integration is an AWS integration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Integration { - ///

Specifies the integration's cache key parameters.

+ ///

A list of request parameters whose values API Gateway caches. To be valid values for cacheKeyParameters, these parameters must also be specified for Method requestParameters.

#[serde(rename = "cacheKeyParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub cache_key_parameters: Option>, - ///

Specifies the integration's cache namespace.

+ ///

An API-specific tag group of related cached parameters. To be valid values for cacheKeyParameters, these parameters must also be specified for Method requestParameters.

#[serde(rename = "cacheNamespace")] #[serde(skip_serializing_if = "Option::is_none")] pub cache_namespace: Option, @@ -2017,7 +2032,7 @@ pub struct Integration { #[serde(rename = "connectionType")] #[serde(skip_serializing_if = "Option::is_none")] pub connection_type: Option, - ///

Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:

  • CONVERT_TO_BINARY: Converts a request payload from a Base64-encoded string to the corresponding binary blob.

  • CONVERT_TO_TEXT: Converts a request payload from a binary blob to a Base64-encoded string.

If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through.

+ ///

Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:

  • CONVERT_TO_BINARY: Converts a request payload from a Base64-encoded string to the corresponding binary blob.

  • CONVERT_TO_TEXT: Converts a request payload from a binary blob to a Base64-encoded string.

If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehavior is configured to support payload pass-through.

#[serde(rename = "contentHandling")] #[serde(skip_serializing_if = "Option::is_none")] pub content_handling: Option, @@ -2061,7 +2076,7 @@ pub struct Integration { ///

Represents an integration response. The status code must map to an existing MethodResponse, and parameters and templates can be used to transform the back-end response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IntegrationResponse { ///

Specifies how to handle response payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:

  • CONVERT_TO_BINARY: Converts a response payload from a Base64-encoded string to the corresponding binary blob.

  • CONVERT_TO_TEXT: Converts a response payload from a binary blob to a Base64-encoded string.

If this property is not defined, the response payload will be passed through from the integration response to the method response without modification.

#[serde(rename = "contentHandling")] @@ -2087,7 +2102,7 @@ pub struct IntegrationResponse { ///

Represents a client-facing interface by which the client calls the API to access back-end resources. A Method resource is integrated with an Integration resource. Both consist of a request and one or more responses. The method request takes the client input that is passed to the back end through the integration request. A method response returns the output from the back end to the client through an integration response. A method request is embodied in a Method resource, whereas an integration request is embodied in an Integration resource. On the other hand, a method response is represented by a MethodResponse resource, whereas an integration response is represented by an IntegrationResponse resource.

Example: Retrive the GET method on a specified resource

Request

The following example request retrieves the information about the GET method on an API resource (3kzxbg5sa2) of an API (fugvjdxtri).

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T210259Z Authorization: AWS4-HMAC-SHA256 Credential={accesskeyID}/20160603/us-east-1/apigateway/aws4request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ "links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", "name": "integrationresponse", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html", "name": "method", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", "name": "methodresponse", "templated": true } ], "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET", "name": "GET", "title": "GET" }, "integration:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" }, "method:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" }, "method:integration": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" }, "method:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", "name": "200", "title": "200" }, "method:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" }, "methodresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{statuscode}", "templated": true } }, "apiKeyRequired": true, "authorizationType": "NONE", "httpMethod": "GET", "embedded": { "method:integration": { "links": { "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" }, "integration:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" }, "integration:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" }, "integrationresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{statuscode}", "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "3kzxbg5sa2", "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": "POST", "passthroughBehavior": "WHENNOMATCH", "requestParameters": { "integration.request.header.Content-Type": "'application/x-amz-json-1.1'" }, "requestTemplates": { "application/json": "{\n}" }, "type": "AWS", "uri": "arn:aws:apigateway:us-east-1:kinesis:action/ListStreams", "embedded": { "integration:responses": { "links": { "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", "name": "200", "title": "200" }, "integrationresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" }, "responseTemplates": { "application/json": "$util.urlDecode("%3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E")" }, "statusCode": "200" } } }, "method:responses": { "links": { "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", "name": "200", "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" } }, "responseModels": { "application/json": "Empty" }, "responseParameters": { "method.response.header.Content-Type": false }, "statusCode": "200" } } }

In the example above, the response template for the 200 OK response maps the JSON output from the ListStreams action in the back end to an XML output. The mapping template is URL-encoded as %3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E and the output is decoded using the $util.urlDecode() helper function.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Method { ///

A boolean flag specifying whether a valid ApiKey is required to invoke this method.

#[serde(rename = "apiKeyRequired")] @@ -2117,7 +2132,7 @@ pub struct Method { #[serde(rename = "methodResponses")] #[serde(skip_serializing_if = "Option::is_none")] pub method_responses: Option<::std::collections::HashMap>, - ///

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.

+ ///

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in the PetStore example.

#[serde(rename = "operationName")] #[serde(skip_serializing_if = "Option::is_none")] pub operation_name: Option, @@ -2137,7 +2152,7 @@ pub struct Method { ///

Represents a method response of a given HTTP status code returned to the client. The method response is passed from the back end through the associated integration response that can be transformed using a mapping template.

Example: A MethodResponse instance of an API

Request

The example request retrieves a MethodResponse of the 200 status code.

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T222952Z Authorization: AWS4-HMAC-SHA256 Credential={accesskeyID}/20160603/us-east-1/apigateway/aws4request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4hash}
Response

The successful response returns 200 OK status and a payload as follows:

{ "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", "name": "methodresponse", "templated": true }, "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" } }, "responseModels": { "application/json": "Empty" }, "responseParameters": { "method.response.header.Content-Type": false }, "statusCode": "200" }

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MethodResponse { ///

Specifies the Model resources used for the response's content-type. Response models are represented as a key/value map, with a content-type as the key and a Model name as the value.

#[serde(rename = "responseModels")] @@ -2155,7 +2170,7 @@ pub struct MethodResponse { ///

Specifies the method setting properties.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MethodSetting { ///

Specifies whether the cached responses are encrypted. The PATCH path for this setting is /{method_setting_key}/caching/dataEncrypted, and the value is a Boolean.

#[serde(rename = "cacheDataEncrypted")] @@ -2201,7 +2216,7 @@ pub struct MethodSetting { ///

Represents a summary of a Method resource, given a particular date and time.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MethodSnapshot { ///

Specifies whether the method requires a valid ApiKey.

#[serde(rename = "apiKeyRequired")] @@ -2215,7 +2230,7 @@ pub struct MethodSnapshot { ///

Represents the data structure of a method's request or response payload.

A request model defines the data structure of the client-supplied request payload. A response model defines the data structure of the response payload returned by the back end. Although not required, models are useful for mapping payloads between the front end and back end.

A model is used for generating an API's SDK, validating the input request body, and creating a skeletal mapping template.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Model { ///

The content-type for the model.

#[serde(rename = "contentType")] @@ -2241,7 +2256,7 @@ pub struct Model { ///

Represents a collection of Model resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Models { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -2299,11 +2314,11 @@ pub struct PutGatewayResponseRequest { ///

Sets up a method's integration.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutIntegrationRequest { - ///

Specifies a put integration input's cache key parameters.

+ ///

An API-specific tag group of related cached parameters.

#[serde(rename = "cacheKeyParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub cache_key_parameters: Option>, - ///

Specifies a put integration input's cache namespace.

+ ///

A list of request parameters whose values are to be cached.

#[serde(rename = "cacheNamespace")] #[serde(skip_serializing_if = "Option::is_none")] pub cache_namespace: Option, @@ -2315,7 +2330,7 @@ pub struct PutIntegrationRequest { #[serde(rename = "connectionType")] #[serde(skip_serializing_if = "Option::is_none")] pub connection_type: Option, - ///

Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:

  • CONVERT_TO_BINARY: Converts a request payload from a Base64-encoded string to the corresponding binary blob.

  • CONVERT_TO_TEXT: Converts a request payload from a binary blob to a Base64-encoded string.

If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through.

+ ///

Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:

  • CONVERT_TO_BINARY: Converts a request payload from a Base64-encoded string to the corresponding binary blob.

  • CONVERT_TO_TEXT: Converts a request payload from a binary blob to a Base64-encoded string.

If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehavior is configured to support payload pass-through.

#[serde(rename = "contentHandling")] #[serde(skip_serializing_if = "Option::is_none")] pub content_handling: Option, @@ -2415,7 +2430,7 @@ pub struct PutMethodRequest { ///

[Required] Specifies the method request's HTTP method type.

#[serde(rename = "httpMethod")] pub http_method: String, - ///

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.

+ ///

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in the PetStore example.

#[serde(rename = "operationName")] #[serde(skip_serializing_if = "Option::is_none")] pub operation_name: Option, @@ -2511,7 +2526,7 @@ pub struct QuotaSettings { ///

A set of validation rules for incoming Method requests.

In OpenAPI, a RequestValidator of an API is defined by the x-amazon-apigateway-request-validators.requestValidator object. It the referenced using the x-amazon-apigateway-request-validator property.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestValidator { ///

The identifier of this RequestValidator.

#[serde(rename = "id")] @@ -2533,7 +2548,7 @@ pub struct RequestValidator { ///

A collection of RequestValidator resources of a given RestApi.

In OpenAPI, the RequestValidators of an API is defined by the x-amazon-apigateway-request-validators extension.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestValidators { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -2546,7 +2561,7 @@ pub struct RequestValidators { ///

Represents an API resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { ///

The resource's identifier.

#[serde(rename = "id")] @@ -2572,7 +2587,7 @@ pub struct Resource { ///

Represents a collection of Resource resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resources { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -2585,7 +2600,7 @@ pub struct Resources { ///

Represents a REST API.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestApi { ///

The source of the API key for metering requests according to a usage plan. Valid values are:

  • HEADER to read the API key from the X-API-Key header of a request.
  • AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

#[serde(rename = "apiKeySource")] @@ -2639,7 +2654,7 @@ pub struct RestApi { ///

Contains references to your APIs and links that guide you in how to interact with your collection. A collection offers a paginated view of your APIs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestApis { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -2652,7 +2667,7 @@ pub struct RestApis { ///

A configuration property of an SDK type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SdkConfigurationProperty { ///

The default value of an SdkType configuration property.

#[serde(rename = "defaultValue")] @@ -2689,7 +2704,7 @@ pub struct SdkResponse { ///

A type of SDK that API Gateway can generate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SdkType { ///

A list of configuration properties of an SdkType.

#[serde(rename = "configurationProperties")] @@ -2711,7 +2726,7 @@ pub struct SdkType { ///

The collection of SdkType instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SdkTypes { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -2724,7 +2739,7 @@ pub struct SdkTypes { ///

Represents a unique identifier for a version of a deployed RestApi that is callable by users.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Stage { ///

Settings for logging access in this stage.

#[serde(rename = "accessLogSettings")] @@ -2774,7 +2789,7 @@ pub struct Stage { #[serde(rename = "methodSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub method_settings: Option<::std::collections::HashMap>, - ///

The name of the stage is the first path segment in the Uniform Resource Identifier (URI) of a call to API Gateway.

+ ///

The name of the stage is the first path segment in the Uniform Resource Identifier (URI) of a call to API Gateway. Stage names can only contain alphanumeric characters, hyphens, and underscores. Maximum length is 128 characters.

#[serde(rename = "stageName")] #[serde(skip_serializing_if = "Option::is_none")] pub stage_name: Option, @@ -2811,7 +2826,7 @@ pub struct StageKey { ///

A list of Stage resources that are associated with the ApiKey resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Stages { ///

The current page of elements from this collection.

#[serde(rename = "item")] @@ -2822,7 +2837,7 @@ pub struct Stages { ///

Adds or updates a tag on a given resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TagResourceRequest { - ///

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded. At present, Stage is the only taggable resource.

+ ///

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded.

#[serde(rename = "resourceArn")] pub resource_arn: String, ///

[Required] The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters.

@@ -2832,7 +2847,7 @@ pub struct TagResourceRequest { ///

The collection of tags. Each tag element is associated with a given resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Tags { ///

The collection of tags. Each tag element is associated with a given resource.

#[serde(rename = "tags")] @@ -2842,7 +2857,7 @@ pub struct Tags { ///

Represents a mapping template used to transform a payload.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Template { ///

The Apache Velocity Template Language (VTL) template content used for the template resource.

#[serde(rename = "value")] @@ -2887,7 +2902,7 @@ pub struct TestInvokeAuthorizerRequest { ///

Represents the response of the test invoke request for a custom Authorizer

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestInvokeAuthorizerResponse { #[serde(rename = "authorization")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2958,7 +2973,7 @@ pub struct TestInvokeMethodRequest { ///

Represents the response of the test invoke request in the HTTP method.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestInvokeMethodResponse { ///

The body of the HTTP response.

#[serde(rename = "body")] @@ -3002,7 +3017,7 @@ pub struct ThrottleSettings { ///

Removes a tag from a given resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UntagResourceRequest { - ///

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded. At present, Stage is the only taggable resource.

+ ///

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded.

#[serde(rename = "resourceArn")] pub resource_arn: String, ///

[Required] The Tag keys to delete.

@@ -3049,7 +3064,7 @@ pub struct UpdateAuthorizerRequest { ///

A request to change information about the BasePathMapping resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateBasePathMappingRequest { - ///

[Required] The base path of the BasePathMapping resource to change.

+ ///

[Required] The base path of the BasePathMapping resource to change.

To specify an empty base path, set this parameter to '(none)'.

#[serde(rename = "basePath")] pub base_path: String, ///

[Required] The domain name of the BasePathMapping resource to change.

@@ -3336,7 +3351,7 @@ pub struct UpdateVpcLinkRequest { ///

Represents the usage data of a usage plan.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Usage { ///

The ending date of the usage data.

#[serde(rename = "endDate")] @@ -3361,7 +3376,7 @@ pub struct Usage { ///

Represents a usage plan than can specify who can assess associated API stages with specified request limits and quotas.

In a usage plan, you associate an API by specifying the API's Id and a stage name of the specified API. You add plan customers by adding API keys to the plan.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsagePlan { ///

The associated API stages of a usage plan.

#[serde(rename = "apiStages")] @@ -3399,7 +3414,7 @@ pub struct UsagePlan { ///

Represents a usage plan key to identify a plan customer.

To associate an API stage with a selected API key in a usage plan, you must create a UsagePlanKey resource to represent the selected ApiKey.

"

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsagePlanKey { ///

The Id of a usage plan key.

#[serde(rename = "id")] @@ -3421,7 +3436,7 @@ pub struct UsagePlanKey { ///

Represents the collection of usage plan keys added to usage plans for the associated API keys and, possibly, other types of keys.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsagePlanKeys { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -3434,7 +3449,7 @@ pub struct UsagePlanKeys { ///

Represents a collection of usage plans for an AWS account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsagePlans { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -3447,7 +3462,7 @@ pub struct UsagePlans { ///

A API Gateway VPC link for a RestApi to access resources in an Amazon Virtual Private Cloud (VPC).

To enable access to a resource in an Amazon Virtual Private Cloud through Amazon API Gateway, you, as an API developer, create a VpcLink resource targeted for one or more network load balancers of the VPC and then integrate an API method with a private integration that uses the VpcLink. The private integration has an integration type of HTTP or HTTPPROXY and has a connection type of VPCLINK. The integration uses the connectionId property to identify the VpcLink used.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcLink { ///

The description of the VPC link.

#[serde(rename = "description")] @@ -3481,7 +3496,7 @@ pub struct VpcLink { ///

The collection of VPC links under the caller's account in a region.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcLinks { ///

The current page of elements from this collection.

#[serde(rename = "items")] @@ -10441,7 +10456,7 @@ pub trait ApiGateway { ///

Adds or updates a tag on a given resource.

fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError>; - ///

Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

+ ///

Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

fn test_invoke_authorizer( &self, input: TestInvokeAuthorizerRequest, @@ -10584,10 +10599,7 @@ impl ApiGatewayClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ApiGatewayClient { - ApiGatewayClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -10601,10 +10613,14 @@ impl ApiGatewayClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ApiGatewayClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ApiGatewayClient { + ApiGatewayClient { client, region } } } @@ -13822,7 +13838,7 @@ impl ApiGateway for ApiGatewayClient { }) } - ///

Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

+ ///

Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

fn test_invoke_authorizer( &self, input: TestInvokeAuthorizerRequest, diff --git a/rusoto/services/apigatewaymanagementapi/Cargo.toml b/rusoto/services/apigatewaymanagementapi/Cargo.toml index 9c973fe55b2..a69e5141fab 100644 --- a/rusoto/services/apigatewaymanagementapi/Cargo.toml +++ b/rusoto/services/apigatewaymanagementapi/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_apigatewaymanagementapi" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -21,14 +21,16 @@ serde = "1.0.2" serde_derive = "1.0.2" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/apigatewaymanagementapi/README.md b/rusoto/services/apigatewaymanagementapi/README.md index 470852a281e..f9c857bc15f 100644 --- a/rusoto/services/apigatewaymanagementapi/README.md +++ b/rusoto/services/apigatewaymanagementapi/README.md @@ -23,9 +23,16 @@ To use `rusoto_apigatewaymanagementapi` in your application, add it as a depende ```toml [dependencies] -rusoto_apigatewaymanagementapi = "0.40.0" +rusoto_apigatewaymanagementapi = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/apigatewaymanagementapi/src/custom/mod.rs b/rusoto/services/apigatewaymanagementapi/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/apigatewaymanagementapi/src/custom/mod.rs +++ b/rusoto/services/apigatewaymanagementapi/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/apigatewaymanagementapi/src/generated.rs b/rusoto/services/apigatewaymanagementapi/src/generated.rs index 7bcf2904bed..f0ba7ff2155 100644 --- a/rusoto/services/apigatewaymanagementapi/src/generated.rs +++ b/rusoto/services/apigatewaymanagementapi/src/generated.rs @@ -9,20 +9,58 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteConnectionRequest { + #[serde(rename = "ConnectionId")] + pub connection_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetConnectionRequest { + #[serde(rename = "ConnectionId")] + pub connection_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetConnectionResponse { + ///

The time in ISO 8601 format for when the connection was established.

+ #[serde(rename = "ConnectedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub connected_at: Option, + #[serde(rename = "Identity")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity: Option, + ///

The time in ISO 8601 format for when the connection was last active.

+ #[serde(rename = "LastActiveAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_active_at: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Identity { + ///

The source IP address of the TCP connection making the request to API Gateway.

+ #[serde(rename = "SourceIp")] + pub source_ip: String, + ///

The User Agent of the API caller.

+ #[serde(rename = "UserAgent")] + pub user_agent: String, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PostToConnectionRequest { ///

The identifier of the connection that a specific client is using.

@@ -38,6 +76,94 @@ pub struct PostToConnectionRequest { pub data: bytes::Bytes, } +/// Errors returned by DeleteConnection +#[derive(Debug, PartialEq)] +pub enum DeleteConnectionError { + ///

The caller is not authorized to invoke this operation.

+ Forbidden(String), + ///

The connection with the provided id no longer exists.

+ Gone(String), + ///

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

+ LimitExceeded(String), +} + +impl DeleteConnectionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ForbiddenException" => { + return RusotoError::Service(DeleteConnectionError::Forbidden(err.msg)) + } + "GoneException" => { + return RusotoError::Service(DeleteConnectionError::Gone(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DeleteConnectionError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteConnectionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteConnectionError { + fn description(&self) -> &str { + match *self { + DeleteConnectionError::Forbidden(ref cause) => cause, + DeleteConnectionError::Gone(ref cause) => cause, + DeleteConnectionError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by GetConnection +#[derive(Debug, PartialEq)] +pub enum GetConnectionError { + ///

The caller is not authorized to invoke this operation.

+ Forbidden(String), + ///

The connection with the provided id no longer exists.

+ Gone(String), + ///

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

+ LimitExceeded(String), +} + +impl GetConnectionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ForbiddenException" => { + return RusotoError::Service(GetConnectionError::Forbidden(err.msg)) + } + "GoneException" => return RusotoError::Service(GetConnectionError::Gone(err.msg)), + "LimitExceededException" => { + return RusotoError::Service(GetConnectionError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetConnectionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetConnectionError { + fn description(&self) -> &str { + match *self { + GetConnectionError::Forbidden(ref cause) => cause, + GetConnectionError::Gone(ref cause) => cause, + GetConnectionError::LimitExceeded(ref cause) => cause, + } + } +} /// Errors returned by PostToConnection #[derive(Debug, PartialEq)] pub enum PostToConnectionError { @@ -45,7 +171,7 @@ pub enum PostToConnectionError { Forbidden(String), ///

The connection with the provided id no longer exists.

Gone(String), - ///

The client is sending more than the allowed number of requests per unit of time.

+ ///

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

LimitExceeded(String), ///

The data has exceeded the maximum size allowed.

PayloadTooLarge(String), @@ -91,6 +217,18 @@ impl Error for PostToConnectionError { } /// Trait representing the capabilities of the AmazonApiGatewayManagementApi API. AmazonApiGatewayManagementApi clients implement this trait. pub trait ApiGatewayManagementApi { + ///

Delete the connection with the provided id.

+ fn delete_connection( + &self, + input: DeleteConnectionRequest, + ) -> RusotoFuture<(), DeleteConnectionError>; + + ///

Get information about the connection with the provided id.

+ fn get_connection( + &self, + input: GetConnectionRequest, + ) -> RusotoFuture; + ///

Sends the provided data to the specified connection.

fn post_to_connection( &self, @@ -109,10 +247,7 @@ impl ApiGatewayManagementApiClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ApiGatewayManagementApiClient { - ApiGatewayManagementApiClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -126,14 +261,84 @@ impl ApiGatewayManagementApiClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ApiGatewayManagementApiClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client( + client: Client, + region: region::Region, + ) -> ApiGatewayManagementApiClient { + ApiGatewayManagementApiClient { client, region } } } impl ApiGatewayManagementApi for ApiGatewayManagementApiClient { + ///

Delete the connection with the provided id.

+ fn delete_connection( + &self, + input: DeleteConnectionRequest, + ) -> RusotoFuture<(), DeleteConnectionError> { + let request_uri = format!( + "/@connections/{connection_id}", + connection_id = input.connection_id + ); + + let mut request = SignedRequest::new("DELETE", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 204 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = ::std::mem::drop(response); + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteConnectionError::from_response(response))), + ) + } + }) + } + + ///

Get information about the connection with the provided id.

+ fn get_connection( + &self, + input: GetConnectionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/@connections/{connection_id}", + connection_id = input.connection_id + ); + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetConnectionError::from_response(response))), + ) + } + }) + } + ///

Sends the provided data to the specified connection.

fn post_to_connection( &self, diff --git a/rusoto/services/apigatewayv2/Cargo.toml b/rusoto/services/apigatewayv2/Cargo.toml index 60f496d0c37..cbcadca9b30 100644 --- a/rusoto/services/apigatewayv2/Cargo.toml +++ b/rusoto/services/apigatewayv2/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_apigatewayv2" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/apigatewayv2/README.md b/rusoto/services/apigatewayv2/README.md index 9b8b2c11a88..4339fe4f0e9 100644 --- a/rusoto/services/apigatewayv2/README.md +++ b/rusoto/services/apigatewayv2/README.md @@ -23,9 +23,16 @@ To use `rusoto_apigatewayv2` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_apigatewayv2 = "0.40.0" +rusoto_apigatewayv2 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/apigatewayv2/src/custom/mod.rs b/rusoto/services/apigatewayv2/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/apigatewayv2/src/custom/mod.rs +++ b/rusoto/services/apigatewayv2/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/apigatewayv2/src/generated.rs b/rusoto/services/apigatewayv2/src/generated.rs index f7545adebe9..7611d12a746 100644 --- a/rusoto/services/apigatewayv2/src/generated.rs +++ b/rusoto/services/apigatewayv2/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -41,7 +40,7 @@ pub struct AccessLogSettings { ///

Represents an API.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Api { ///

The URI of the API, of the form {api-id}.execute-api.{region}.amazonaws.com. The /// stage name is typically appended to this URI to form a complete path to a deployed @@ -78,6 +77,10 @@ pub struct Api { ///

The route selection expression for the API.

#[serde(rename = "RouteSelectionExpression")] pub route_selection_expression: String, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

A version identifier for the API.

#[serde(rename = "Version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -91,7 +94,7 @@ pub struct Api { ///

Represents an API mapping.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApiMapping { ///

The API identifier.

#[serde(rename = "ApiId")] @@ -111,7 +114,7 @@ pub struct ApiMapping { ///

Represents an authorizer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Authorizer { ///

Specifies the required credentials as an IAM role for API Gateway to invoke the /// authorizer. To specify an IAM role for API Gateway to assume, use the role's Amazon @@ -201,7 +204,7 @@ pub struct CreateApiMappingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApiMappingResponse { ///

The API identifier.

#[serde(rename = "ApiId")] @@ -244,6 +247,10 @@ pub struct CreateApiRequest { ///

The route selection expression for the API.

#[serde(rename = "RouteSelectionExpression")] pub route_selection_expression: String, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

A version identifier for the API.

#[serde(rename = "Version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -251,7 +258,7 @@ pub struct CreateApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApiResponse { ///

The URI of the API, of the form {api-id}.execute-api.{region}.amazonaws.com. The /// stage name is typically appended to this URI to form a complete path to a deployed @@ -291,6 +298,10 @@ pub struct CreateApiResponse { #[serde(rename = "RouteSelectionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub route_selection_expression: Option, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

A version identifier for the API.

#[serde(rename = "Version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -371,7 +382,7 @@ pub struct CreateAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAuthorizerResponse { ///

Specifies the required credentials as an IAM role for API Gateway to invoke the /// authorizer. To specify an IAM role for API Gateway to assume, use the role's Amazon @@ -461,7 +472,7 @@ pub struct CreateDeploymentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeploymentResponse { ///

The date and time when the Deployment resource was created.

#[serde(rename = "CreatedDate")] @@ -495,10 +506,14 @@ pub struct CreateDomainNameRequest { #[serde(rename = "DomainNameConfigurations")] #[serde(skip_serializing_if = "Option::is_none")] pub domain_name_configurations: Option>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDomainNameResponse { ///

The API mapping selection expression.

#[serde(rename = "ApiMappingSelectionExpression")] @@ -512,6 +527,10 @@ pub struct CreateDomainNameResponse { #[serde(rename = "DomainNameConfigurations")] #[serde(skip_serializing_if = "Option::is_none")] pub domain_name_configurations: Option>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -630,7 +649,7 @@ pub struct CreateIntegrationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIntegrationResponse { ///

The connection ID.

#[serde(rename = "ConnectionId")] @@ -801,7 +820,7 @@ pub struct CreateIntegrationResponseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIntegrationResponseResponse { ///

Specifies how to handle response payload content type conversions. Supported /// values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the @@ -870,7 +889,7 @@ pub struct CreateModelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateModelResponse { ///

The content-type for the model, for example, "application/json".

#[serde(rename = "ContentType")] @@ -953,7 +972,7 @@ pub struct CreateRouteRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRouteResponse { ///

Specifies whether an API key is required for this route.

#[serde(rename = "ApiKeyRequired")] @@ -1043,7 +1062,7 @@ pub struct CreateRouteResponseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRouteResponseResponse { ///

Represents the model selection expression of a route response.

#[serde(rename = "ModelSelectionExpression")] @@ -1105,10 +1124,14 @@ pub struct CreateStageRequest { #[serde(rename = "StageVariables")] #[serde(skip_serializing_if = "Option::is_none")] pub stage_variables: Option<::std::collections::HashMap>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStageResponse { ///

Settings for logging access in this stage.

#[serde(rename = "AccessLogSettings")] @@ -1153,6 +1176,10 @@ pub struct CreateStageResponse { #[serde(rename = "StageVariables")] #[serde(skip_serializing_if = "Option::is_none")] pub stage_variables: Option<::std::collections::HashMap>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1269,7 +1296,7 @@ pub struct DeleteStageRequest { /// Deployment must be associated with a Stage for it to be /// callable over the internet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Deployment { ///

The date and time when the Deployment resource was created.

#[serde(rename = "CreatedDate")] @@ -1296,7 +1323,7 @@ pub struct Deployment { ///

Represents a domain name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainName { ///

The API mapping selection expression.

#[serde(rename = "ApiMappingSelectionExpression")] @@ -1309,6 +1336,10 @@ pub struct DomainName { #[serde(rename = "DomainNameConfigurations")] #[serde(skip_serializing_if = "Option::is_none")] pub domain_name_configurations: Option>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } ///

The domain name configuration.

@@ -1333,6 +1364,14 @@ pub struct DomainNameConfiguration { #[serde(rename = "CertificateUploadDate")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_upload_date: Option, + ///

The status of the domain name migration. The valid values are AVAILABLE and UPDATING. If the status is UPDATING, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE, the domain can be updated.

+ #[serde(rename = "DomainNameStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub domain_name_status: Option, + ///

An optional text message containing detailed information about status of the domain name migration.

+ #[serde(rename = "DomainNameStatusMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub domain_name_status_message: Option, ///

The endpoint type.

#[serde(rename = "EndpointType")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1341,6 +1380,10 @@ pub struct DomainNameConfiguration { #[serde(rename = "HostedZoneId")] #[serde(skip_serializing_if = "Option::is_none")] pub hosted_zone_id: Option, + ///

The Transport Layer Security (TLS) version of the security policy for this domain name. The valid values are TLS_1_0 and TLS_1_2.

+ #[serde(rename = "SecurityPolicy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub security_policy: Option, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1354,7 +1397,7 @@ pub struct GetApiMappingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApiMappingResponse { ///

The API identifier.

#[serde(rename = "ApiId")] @@ -1391,7 +1434,7 @@ pub struct GetApiMappingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApiMappingsResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1412,7 +1455,7 @@ pub struct GetApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApiResponse { ///

The URI of the API, of the form {api-id}.execute-api.{region}.amazonaws.com. The /// stage name is typically appended to this URI to form a complete path to a deployed @@ -1452,6 +1495,10 @@ pub struct GetApiResponse { #[serde(rename = "RouteSelectionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub route_selection_expression: Option, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

A version identifier for the API.

#[serde(rename = "Version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1477,7 +1524,7 @@ pub struct GetApisRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApisResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1501,7 +1548,7 @@ pub struct GetAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAuthorizerResponse { ///

Specifies the required credentials as an IAM role for API Gateway to invoke the /// authorizer. To specify an IAM role for API Gateway to assume, use the role's Amazon @@ -1591,7 +1638,7 @@ pub struct GetAuthorizersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAuthorizersResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1615,7 +1662,7 @@ pub struct GetDeploymentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentResponse { ///

The date and time when the Deployment resource was created.

#[serde(rename = "CreatedDate")] @@ -1657,7 +1704,7 @@ pub struct GetDeploymentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentsResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1678,7 +1725,7 @@ pub struct GetDomainNameRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainNameResponse { ///

The API mapping selection expression.

#[serde(rename = "ApiMappingSelectionExpression")] @@ -1692,6 +1739,10 @@ pub struct GetDomainNameResponse { #[serde(rename = "DomainNameConfigurations")] #[serde(skip_serializing_if = "Option::is_none")] pub domain_name_configurations: Option>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1708,7 +1759,7 @@ pub struct GetDomainNamesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainNamesResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1732,7 +1783,7 @@ pub struct GetIntegrationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntegrationResponse { ///

The connection ID.

#[serde(rename = "ConnectionId")] @@ -1867,7 +1918,7 @@ pub struct GetIntegrationResponseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntegrationResponseResponse { ///

Specifies how to handle response payload content type conversions. Supported /// values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the @@ -1933,7 +1984,7 @@ pub struct GetIntegrationResponsesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntegrationResponsesResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1963,7 +2014,7 @@ pub struct GetIntegrationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntegrationsResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -1987,7 +2038,7 @@ pub struct GetModelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetModelResponse { ///

The content-type for the model, for example, "application/json".

#[serde(rename = "ContentType")] @@ -2023,7 +2074,7 @@ pub struct GetModelTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetModelTemplateResponse { ///

The template value.

#[serde(rename = "Value")] @@ -2048,7 +2099,7 @@ pub struct GetModelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetModelsResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -2072,7 +2123,7 @@ pub struct GetRouteRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRouteResponse { ///

Specifies whether an API key is required for this route.

#[serde(rename = "ApiKeyRequired")] @@ -2150,7 +2201,7 @@ pub struct GetRouteResponseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRouteResponseResponse { ///

Represents the model selection expression of a route response.

#[serde(rename = "ModelSelectionExpression")] @@ -2194,7 +2245,7 @@ pub struct GetRouteResponsesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRouteResponsesResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -2224,7 +2275,7 @@ pub struct GetRoutesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRoutesResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -2248,7 +2299,7 @@ pub struct GetStageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetStageResponse { ///

Settings for logging access in this stage.

#[serde(rename = "AccessLogSettings")] @@ -2293,6 +2344,10 @@ pub struct GetStageResponse { #[serde(rename = "StageVariables")] #[serde(skip_serializing_if = "Option::is_none")] pub stage_variables: Option<::std::collections::HashMap>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2312,7 +2367,7 @@ pub struct GetStagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetStagesResponse { ///

The elements from this collection.

#[serde(rename = "Items")] @@ -2325,9 +2380,23 @@ pub struct GetStagesResponse { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetTagsRequest { + #[serde(rename = "ResourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetTagsResponse { + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + ///

Represents an integration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Integration { ///

The connection ID.

#[serde(rename = "ConnectionId")] @@ -2450,7 +2519,7 @@ pub struct Integration { ///

Represents an integration response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IntegrationResponse { ///

Specifies how to handle response payload content type conversions. Supported /// values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the @@ -2498,7 +2567,7 @@ pub struct IntegrationResponse { ///

Represents a data model for an API. See Create Models and Mapping Templates for Request and Response /// Mappings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Model { ///

The content-type for the model, for example, "application/json".

#[serde(rename = "ContentType")] @@ -2534,7 +2603,7 @@ pub struct ParameterConstraints { ///

Represents a route.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Route { ///

Specifies whether an API key is required for this route.

#[serde(rename = "ApiKeyRequired")] @@ -2599,7 +2668,7 @@ pub struct Route { ///

Represents a route response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RouteResponse { ///

Represents the model selection expression of a route response.

#[serde(rename = "ModelSelectionExpression")] @@ -2653,7 +2722,7 @@ pub struct RouteSettings { ///

Represents an API stage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Stage { ///

Settings for logging access in this stage.

#[serde(rename = "AccessLogSettings")] @@ -2697,6 +2766,35 @@ pub struct Stage { #[serde(rename = "StageVariables")] #[serde(skip_serializing_if = "Option::is_none")] pub stage_variables: Option<::std::collections::HashMap>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

AWS resource arn

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

AWS resource arn

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The Tag keys to delete

+ #[serde(rename = "TagKeys")] + pub tag_keys: Vec, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2721,7 +2819,7 @@ pub struct UpdateApiMappingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateApiMappingResponse { ///

The API identifier.

#[serde(rename = "ApiId")] @@ -2773,7 +2871,7 @@ pub struct UpdateApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateApiResponse { ///

The URI of the API, of the form {api-id}.execute-api.{region}.amazonaws.com. The /// stage name is typically appended to this URI to form a complete path to a deployed @@ -2813,6 +2911,10 @@ pub struct UpdateApiResponse { #[serde(rename = "RouteSelectionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub route_selection_expression: Option, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

A version identifier for the API.

#[serde(rename = "Version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2899,7 +3001,7 @@ pub struct UpdateAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAuthorizerResponse { ///

Specifies the required credentials as an IAM role for API Gateway to invoke the /// authorizer. To specify an IAM role for API Gateway to assume, use the role's Amazon @@ -2987,7 +3089,7 @@ pub struct UpdateDeploymentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeploymentResponse { ///

The date and time when the Deployment resource was created.

#[serde(rename = "CreatedDate")] @@ -3024,7 +3126,7 @@ pub struct UpdateDomainNameRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainNameResponse { ///

The API mapping selection expression.

#[serde(rename = "ApiMappingSelectionExpression")] @@ -3038,6 +3140,10 @@ pub struct UpdateDomainNameResponse { #[serde(rename = "DomainNameConfigurations")] #[serde(skip_serializing_if = "Option::is_none")] pub domain_name_configurations: Option>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3160,7 +3266,7 @@ pub struct UpdateIntegrationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIntegrationResponse { ///

The connection ID.

#[serde(rename = "ConnectionId")] @@ -3340,7 +3446,7 @@ pub struct UpdateIntegrationResponseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIntegrationResponseResponse { ///

Specifies how to handle response payload content type conversions. Supported /// values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the @@ -3414,7 +3520,7 @@ pub struct UpdateModelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateModelResponse { ///

The content-type for the model, for example, "application/json".

#[serde(rename = "ContentType")] @@ -3501,7 +3607,7 @@ pub struct UpdateRouteRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRouteResponse { ///

Specifies whether an API key is required for this route.

#[serde(rename = "ApiKeyRequired")] @@ -3595,7 +3701,7 @@ pub struct UpdateRouteResponseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRouteResponseResponse { ///

Represents the model selection expression of a route response.

#[serde(rename = "ModelSelectionExpression")] @@ -3660,7 +3766,7 @@ pub struct UpdateStageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateStageResponse { ///

Settings for logging access in this stage.

#[serde(rename = "AccessLogSettings")] @@ -3705,6 +3811,10 @@ pub struct UpdateStageResponse { #[serde(rename = "StageVariables")] #[serde(skip_serializing_if = "Option::is_none")] pub stage_variables: Option<::std::collections::HashMap>, + ///

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters..

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } /// Errors returned by CreateApi @@ -5771,6 +5881,171 @@ impl Error for GetStagesError { } } } +/// Errors returned by GetTags +#[derive(Debug, PartialEq)] +pub enum GetTagsError { + ///

The request is not valid, for example, the input is incomplete or incorrect. See + /// the accompanying error message for details.

+ BadRequest(String), + ///

The requested operation would cause a conflict with the current state of a service + /// resource associated with the request. Resolve the conflict before retrying this + /// request. See the accompanying error message for details.

+ Conflict(String), + ///

The resource specified in the request was not found. See the message + /// field for more information.

+ NotFound(String), + ///

A limit has been exceeded. See the accompanying error message for details.

+ TooManyRequests(String), +} + +impl GetTagsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(GetTagsError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(GetTagsError::Conflict(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(GetTagsError::NotFound(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(GetTagsError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetTagsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetTagsError { + fn description(&self) -> &str { + match *self { + GetTagsError::BadRequest(ref cause) => cause, + GetTagsError::Conflict(ref cause) => cause, + GetTagsError::NotFound(ref cause) => cause, + GetTagsError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

The request is not valid, for example, the input is incomplete or incorrect. See + /// the accompanying error message for details.

+ BadRequest(String), + ///

The requested operation would cause a conflict with the current state of a service + /// resource associated with the request. Resolve the conflict before retrying this + /// request. See the accompanying error message for details.

+ Conflict(String), + ///

The resource specified in the request was not found. See the message + /// field for more information.

+ NotFound(String), + ///

A limit has been exceeded. See the accompanying error message for details.

+ TooManyRequests(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(TagResourceError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(TagResourceError::Conflict(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(TagResourceError::NotFound(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(TagResourceError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::BadRequest(ref cause) => cause, + TagResourceError::Conflict(ref cause) => cause, + TagResourceError::NotFound(ref cause) => cause, + TagResourceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

The request is not valid, for example, the input is incomplete or incorrect. See + /// the accompanying error message for details.

+ BadRequest(String), + ///

The requested operation would cause a conflict with the current state of a service + /// resource associated with the request. Resolve the conflict before retrying this + /// request. See the accompanying error message for details.

+ Conflict(String), + ///

The resource specified in the request was not found. See the message + /// field for more information.

+ NotFound(String), + ///

A limit has been exceeded. See the accompanying error message for details.

+ TooManyRequests(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UntagResourceError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(UntagResourceError::Conflict(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UntagResourceError::NotFound(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(UntagResourceError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::BadRequest(ref cause) => cause, + UntagResourceError::Conflict(ref cause) => cause, + UntagResourceError::NotFound(ref cause) => cause, + UntagResourceError::TooManyRequests(ref cause) => cause, + } + } +} /// Errors returned by UpdateApi #[derive(Debug, PartialEq)] pub enum UpdateApiError { @@ -6625,6 +6900,18 @@ pub trait ApiGatewayV2 { input: GetStagesRequest, ) -> RusotoFuture; + ///

Gets the Tags for an API.

+ fn get_tags(&self, input: GetTagsRequest) -> RusotoFuture; + + ///

Tag an APIGW resource

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + ///

Untag an APIGW resource

+ fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError>; + ///

Updates an Api resource.

fn update_api( &self, @@ -6703,10 +6990,7 @@ impl ApiGatewayV2Client { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ApiGatewayV2Client { - ApiGatewayV2Client { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6720,10 +7004,14 @@ impl ApiGatewayV2Client { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ApiGatewayV2Client { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ApiGatewayV2Client { + ApiGatewayV2Client { client, region } } } @@ -8213,6 +8501,95 @@ impl ApiGatewayV2 for ApiGatewayV2Client { }) } + ///

Gets the Tags for an API.

+ fn get_tags(&self, input: GetTagsRequest) -> RusotoFuture { + let request_uri = format!("/v2/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("GET", "apigateway", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetTagsError::from_response(response))), + ) + } + }) + } + + ///

Tag an APIGW resource

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/v2/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("POST", "apigateway", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 201 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Untag an APIGW resource

+ fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError> { + let request_uri = format!("/v2/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("DELETE", "apigateway", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + for item in input.tag_keys.iter() { + params.put("tagKeys", item); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 204 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = ::std::mem::drop(response); + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + ///

Updates an Api resource.

fn update_api( &self, diff --git a/rusoto/services/application-autoscaling/Cargo.toml b/rusoto/services/application-autoscaling/Cargo.toml index 7d8ac661857..5b2fb47e000 100644 --- a/rusoto/services/application-autoscaling/Cargo.toml +++ b/rusoto/services/application-autoscaling/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_application_autoscaling" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/application-autoscaling/README.md b/rusoto/services/application-autoscaling/README.md index 5d329789026..1118af60df3 100644 --- a/rusoto/services/application-autoscaling/README.md +++ b/rusoto/services/application-autoscaling/README.md @@ -23,9 +23,16 @@ To use `rusoto_application_autoscaling` in your application, add it as a depende ```toml [dependencies] -rusoto_application_autoscaling = "0.40.0" +rusoto_application_autoscaling = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/application-autoscaling/src/custom/custom_tests.rs b/rusoto/services/application-autoscaling/src/custom/custom_tests.rs index 4f6a805902b..8f3626d156d 100644 --- a/rusoto/services/application-autoscaling/src/custom/custom_tests.rs +++ b/rusoto/services/application-autoscaling/src/custom/custom_tests.rs @@ -2,8 +2,8 @@ extern crate rusoto_mock; use crate::generated::*; -use rusoto_core::Region; use self::rusoto_mock::*; +use rusoto_core::Region; #[test] // regression test for #1002 @@ -11,7 +11,8 @@ fn register_scalable_target_happy_path() { let body = "{}".to_string(); let mock = MockRequestDispatcher::with_status(200).with_body(&body); - let client = ApplicationAutoScalingClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); + let client = + ApplicationAutoScalingClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); let result = client.register_scalable_target(Default::default()).sync(); result.expect("Couldn't parse register_scalable_target"); @@ -23,7 +24,8 @@ fn register_scalable_target_returs_empty_body() { let body = "".to_string(); let mock = MockRequestDispatcher::with_status(200).with_body(&body); - let client = ApplicationAutoScalingClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); + let client = + ApplicationAutoScalingClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); let result = client.register_scalable_target(Default::default()).sync(); result.expect("Couldn't parse register_scalable_target"); diff --git a/rusoto/services/application-autoscaling/src/generated.rs b/rusoto/services/application-autoscaling/src/generated.rs index d063fa5efbb..f4989640d73 100644 --- a/rusoto/services/application-autoscaling/src/generated.rs +++ b/rusoto/services/application-autoscaling/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Represents a CloudWatch alarm associated with a scaling policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Alarm { ///

The Amazon Resource Name (ARN) of the alarm.

#[serde(rename = "AlarmARN")] @@ -63,56 +62,56 @@ pub struct DeleteScalingPolicyRequest { ///

The name of the scaling policy.

#[serde(rename = "PolicyName")] pub policy_name: String, - ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteScalingPolicyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteScheduledActionRequest { - ///

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, ///

The name of the scheduled action.

#[serde(rename = "ScheduledActionName")] pub scheduled_action_name: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteScheduledActionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeregisterScalableTargetRequest { - ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterScalableTargetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -125,21 +124,21 @@ pub struct DescribeScalableTargetsRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceIds")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_ids: Option>, - ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] #[serde(skip_serializing_if = "Option::is_none")] pub scalable_dimension: Option, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScalableTargetsResponse { ///

The token required to get the next set of results. This value is null if there are no more results to return.

#[serde(rename = "NextToken")] @@ -161,21 +160,21 @@ pub struct DescribeScalingActivitiesRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_id: Option, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] #[serde(skip_serializing_if = "Option::is_none")] pub scalable_dimension: Option, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScalingActivitiesResponse { ///

The token required to get the next set of results. This value is null if there are no more results to return.

#[serde(rename = "NextToken")] @@ -201,21 +200,21 @@ pub struct DescribeScalingPoliciesRequest { #[serde(rename = "PolicyNames")] #[serde(skip_serializing_if = "Option::is_none")] pub policy_names: Option>, - ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_id: Option, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] #[serde(skip_serializing_if = "Option::is_none")] pub scalable_dimension: Option, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScalingPoliciesResponse { ///

The token required to get the next set of results. This value is null if there are no more results to return.

#[serde(rename = "NextToken")] @@ -237,11 +236,11 @@ pub struct DescribeScheduledActionsRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_id: Option, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] #[serde(skip_serializing_if = "Option::is_none")] pub scalable_dimension: Option, @@ -249,13 +248,13 @@ pub struct DescribeScheduledActionsRequest { #[serde(rename = "ScheduledActionNames")] #[serde(skip_serializing_if = "Option::is_none")] pub scheduled_action_names: Option>, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScheduledActionsResponse { ///

The token required to get the next set of results. This value is null if there are no more results to return.

#[serde(rename = "NextToken")] @@ -281,10 +280,10 @@ pub struct MetricDimension { ///

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PredefinedMetricSpecification { - ///

The metric type. The ALBRequestCountPerTarget metric type applies only to Spot fleet requests and ECS services.

+ ///

The metric type. The ALBRequestCountPerTarget metric type applies only to Spot Fleet requests and ECS services.

#[serde(rename = "PredefinedMetricType")] pub predefined_metric_type: String, - ///

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot fleet request or ECS service.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

  • app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN

  • targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.

+ ///

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot Fleet request or ECS service.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

  • app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN

  • targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.

#[serde(rename = "ResourceLabel")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_label: Option, @@ -295,17 +294,17 @@ pub struct PutScalingPolicyRequest { ///

The name of the scaling policy.

#[serde(rename = "PolicyName")] pub policy_name: String, - ///

The policy type. This parameter is required if you are creating a scaling policy.

For information on which services do not support StepScaling or TargetTrackingScaling, see the information about Limits in Step Scaling Policies and Target Tracking Scaling Policies in the Application Auto Scaling User Guide.

+ ///

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR or AppStream

StepScaling—Not supported for Amazon DynamoDB

For more information, see Step Scaling Policies for Application Auto Scaling and Target Tracking Scaling Policies for Application Auto Scaling in the Application Auto Scaling User Guide.

#[serde(rename = "PolicyType")] #[serde(skip_serializing_if = "Option::is_none")] pub policy_type: Option, - ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, ///

A step scaling policy.

This parameter is required if you are creating a policy and the policy type is StepScaling.

@@ -320,7 +319,7 @@ pub struct PutScalingPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutScalingPolicyResponse { ///

The CloudWatch alarms created for the target tracking scaling policy.

#[serde(rename = "Alarms")] @@ -337,10 +336,10 @@ pub struct PutScheduledActionRequest { #[serde(rename = "EndTime")] #[serde(skip_serializing_if = "Option::is_none")] pub end_time: Option, - ///

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, ///

The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

@@ -354,7 +353,7 @@ pub struct PutScheduledActionRequest { ///

The name of the scheduled action.

#[serde(rename = "ScheduledActionName")] pub scheduled_action_name: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, ///

The date and time for the scheduled action to start.

@@ -364,41 +363,45 @@ pub struct PutScheduledActionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutScheduledActionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RegisterScalableTargetRequest { - ///

The maximum value to scale to in response to a scale-out event. This parameter is required to register a scalable target.

+ ///

The maximum value to scale to in response to a scale-out event. MaxCapacity is required to register a scalable target.

#[serde(rename = "MaxCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub max_capacity: Option, - ///

The minimum value to scale to in response to a scale-in event. This parameter is required to register a scalable target.

+ ///

The minimum value to scale to in response to a scale-in event. MinCapacity is required to register a scalable target.

#[serde(rename = "MinCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub min_capacity: Option, - ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

+ ///

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required, and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

#[serde(rename = "RoleARN")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, + ///

An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities.

Suspension Outcomes

  • For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended.

  • For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended.

  • For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended.

For more information, see Suspend and Resume Application Auto Scaling in the Application Auto Scaling User Guide.

+ #[serde(rename = "SuspendedState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub suspended_state: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterScalableTargetResponse {} ///

Represents a scalable target.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalableTarget { ///

The Unix timestamp for when the scalable target was created.

#[serde(rename = "CreationTime")] @@ -409,18 +412,21 @@ pub struct ScalableTarget { ///

The minimum value to scale to in response to a scale-in event.

#[serde(rename = "MinCapacity")] pub min_capacity: i64, - ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, ///

The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

#[serde(rename = "RoleARN")] pub role_arn: String, - ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, + #[serde(rename = "SuspendedState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub suspended_state: Option, } ///

Represents the minimum and maximum capacity for a scheduled action.

@@ -438,7 +444,7 @@ pub struct ScalableTargetAction { ///

Represents a scaling activity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalingActivity { ///

The unique identifier of the scaling activity.

#[serde(rename = "ActivityId")] @@ -457,13 +463,13 @@ pub struct ScalingActivity { #[serde(rename = "EndTime")] #[serde(skip_serializing_if = "Option::is_none")] pub end_time: Option, - ///

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, ///

The Unix timestamp for when the scaling activity began.

@@ -480,7 +486,7 @@ pub struct ScalingActivity { ///

Represents a scaling policy to use with Application Auto Scaling.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalingPolicy { ///

The CloudWatch alarms associated with the scaling policy.

#[serde(rename = "Alarms")] @@ -498,13 +504,13 @@ pub struct ScalingPolicy { ///

The scaling policy type.

#[serde(rename = "PolicyType")] pub policy_type: String, - ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] pub scalable_dimension: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, ///

A step scaling policy.

@@ -520,7 +526,7 @@ pub struct ScalingPolicy { ///

Represents a scheduled action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduledAction { ///

The date and time that the scheduled action was created.

#[serde(rename = "CreationTime")] @@ -529,10 +535,10 @@ pub struct ScheduledAction { #[serde(rename = "EndTime")] #[serde(skip_serializing_if = "Option::is_none")] pub end_time: Option, - ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

+ ///

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

+ ///

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

#[serde(rename = "ScalableDimension")] #[serde(skip_serializing_if = "Option::is_none")] pub scalable_dimension: Option, @@ -549,7 +555,7 @@ pub struct ScheduledAction { ///

The name of the scheduled action.

#[serde(rename = "ScheduledActionName")] pub scheduled_action_name: String, - ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

+ ///

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

#[serde(rename = "ServiceNamespace")] pub service_namespace: String, ///

The date and time that the action is scheduled to begin.

@@ -577,7 +583,7 @@ pub struct StepAdjustment { ///

Represents a step scaling policy configuration to use with Application Auto Scaling.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StepScalingPolicyConfiguration { - ///

The adjustment type, which specifies how the ScalingAdjustment parameter in a StepAdjustment is interpreted.

+ ///

Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute number or a percentage of the current capacity.

#[serde(rename = "AdjustmentType")] #[serde(skip_serializing_if = "Option::is_none")] pub adjustment_type: Option, @@ -599,6 +605,23 @@ pub struct StepScalingPolicyConfiguration { pub step_adjustments: Option>, } +///

Specifies whether the scaling activities for a scalable target are in a suspended state.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SuspendedState { + ///

Whether scale in by a target tracking scaling policy or a step scaling policy is suspended. Set the value to true if you don't want Application Auto Scaling to remove capacity when a scaling policy is triggered. The default is false.

+ #[serde(rename = "DynamicScalingInSuspended")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dynamic_scaling_in_suspended: Option, + ///

Whether scale out by a target tracking scaling policy or a step scaling policy is suspended. Set the value to true if you don't want Application Auto Scaling to add capacity when a scaling policy is triggered. The default is false.

+ #[serde(rename = "DynamicScalingOutSuspended")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dynamic_scaling_out_suspended: Option, + ///

Whether scheduled scaling is suspended. Set the value to true if you don't want Application Auto Scaling to add or remove capacity by initiating scheduled actions. The default is false.

+ #[serde(rename = "ScheduledScalingSuspended")] + #[serde(skip_serializing_if = "Option::is_none")] + pub scheduled_scaling_suspended: Option, +} + ///

Represents a target tracking scaling policy configuration to use with Application Auto Scaling.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TargetTrackingScalingPolicyConfiguration { @@ -1151,43 +1174,43 @@ impl Error for RegisterScalableTargetError { } /// Trait representing the capabilities of the Application Auto Scaling API. Application Auto Scaling clients implement this trait. pub trait ApplicationAutoScaling { - ///

Deletes the specified Application Auto Scaling scaling policy.

Deleting a policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

To create a scaling policy or update an existing one, see PutScalingPolicy.

+ ///

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

To create a scaling policy or update an existing one, see PutScalingPolicy.

fn delete_scaling_policy( &self, input: DeleteScalingPolicyRequest, ) -> RusotoFuture; - ///

Deletes the specified Application Auto Scaling scheduled action.

+ ///

Deletes the specified scheduled action for an Application Auto Scaling scalable target.

For more information, see Delete a Scheduled Action in the Application Auto Scaling User Guide.

fn delete_scheduled_action( &self, input: DeleteScheduledActionRequest, ) -> RusotoFuture; - ///

Deregisters a scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

+ ///

Deregisters an Application Auto Scaling scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

fn deregister_scalable_target( &self, input: DeregisterScalableTargetRequest, ) -> RusotoFuture; - ///

Gets information about the scalable targets in the specified namespace.

You can filter the results using the ResourceIds and ScalableDimension parameters.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

+ ///

Gets information about the scalable targets in the specified namespace.

You can filter the results using ResourceIds and ScalableDimension.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

fn describe_scalable_targets( &self, input: DescribeScalableTargetsRequest, ) -> RusotoFuture; - ///

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using the ResourceId and ScalableDimension parameters.

Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies. To create a scaling policy or update an existing one, see PutScalingPolicy.

+ ///

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using ResourceId and ScalableDimension.

Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies. To create a scaling policy or update an existing one, see PutScalingPolicy.

fn describe_scaling_activities( &self, input: DescribeScalingActivitiesRequest, ) -> RusotoFuture; - ///

Describes the scaling policies for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and PolicyNames parameters.

To create a scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

+ ///

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

To create a scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

fn describe_scaling_policies( &self, input: DescribeScalingPoliciesRequest, ) -> RusotoFuture; - ///

Describes the scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

To create a scheduled action or update an existing one, see PutScheduledAction. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

+ ///

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

To create a scheduled action or update an existing one, see PutScheduledAction. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

fn describe_scheduled_actions( &self, input: DescribeScheduledActionsRequest, @@ -1205,7 +1228,7 @@ pub trait ApplicationAutoScaling { input: PutScheduledActionRequest, ) -> RusotoFuture; - ///

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Each scalable target has a resource ID, scalable dimension, and namespace, as well as values for minimum and maximum capacity.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget.

+ ///

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the combination of resource ID, scalable dimension, and namespace.

When you register a new scalable target, you must specify values for minimum and maximum capacity. Application Auto Scaling will not scale capacity to values that are outside of this range.

To update a scalable target, specify the parameter that you want to change as well as the following parameters that identify the scalable target: resource ID, scalable dimension, and namespace. Any parameters that you don't specify are not changed by this update request.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace by using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget.

fn register_scalable_target( &self, input: RegisterScalableTargetRequest, @@ -1223,10 +1246,7 @@ impl ApplicationAutoScalingClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ApplicationAutoScalingClient { - ApplicationAutoScalingClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1240,15 +1260,19 @@ impl ApplicationAutoScalingClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ApplicationAutoScalingClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ApplicationAutoScalingClient { + ApplicationAutoScalingClient { client, region } } } impl ApplicationAutoScaling for ApplicationAutoScalingClient { - ///

Deletes the specified Application Auto Scaling scaling policy.

Deleting a policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

To create a scaling policy or update an existing one, see PutScalingPolicy.

+ ///

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

To create a scaling policy or update an existing one, see PutScalingPolicy.

fn delete_scaling_policy( &self, input: DeleteScalingPolicyRequest, @@ -1279,7 +1303,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Deletes the specified Application Auto Scaling scheduled action.

+ ///

Deletes the specified scheduled action for an Application Auto Scaling scalable target.

For more information, see Delete a Scheduled Action in the Application Auto Scaling User Guide.

fn delete_scheduled_action( &self, input: DeleteScheduledActionRequest, @@ -1310,7 +1334,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Deregisters a scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

+ ///

Deregisters an Application Auto Scaling scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

fn deregister_scalable_target( &self, input: DeregisterScalableTargetRequest, @@ -1339,7 +1363,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Gets information about the scalable targets in the specified namespace.

You can filter the results using the ResourceIds and ScalableDimension parameters.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

+ ///

Gets information about the scalable targets in the specified namespace.

You can filter the results using ResourceIds and ScalableDimension.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

fn describe_scalable_targets( &self, input: DescribeScalableTargetsRequest, @@ -1368,7 +1392,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using the ResourceId and ScalableDimension parameters.

Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies. To create a scaling policy or update an existing one, see PutScalingPolicy.

+ ///

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using ResourceId and ScalableDimension.

Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies. To create a scaling policy or update an existing one, see PutScalingPolicy.

fn describe_scaling_activities( &self, input: DescribeScalingActivitiesRequest, @@ -1397,7 +1421,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Describes the scaling policies for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and PolicyNames parameters.

To create a scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

+ ///

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

To create a scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

fn describe_scaling_policies( &self, input: DescribeScalingPoliciesRequest, @@ -1426,7 +1450,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Describes the scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

To create a scheduled action or update an existing one, see PutScheduledAction. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

+ ///

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

To create a scheduled action or update an existing one, see PutScheduledAction. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

fn describe_scheduled_actions( &self, input: DescribeScheduledActionsRequest, @@ -1513,7 +1537,7 @@ impl ApplicationAutoScaling for ApplicationAutoScalingClient { }) } - ///

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Each scalable target has a resource ID, scalable dimension, and namespace, as well as values for minimum and maximum capacity.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget.

+ ///

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the combination of resource ID, scalable dimension, and namespace.

When you register a new scalable target, you must specify values for minimum and maximum capacity. Application Auto Scaling will not scale capacity to values that are outside of this range.

To update a scalable target, specify the parameter that you want to change as well as the following parameters that identify the scalable target: resource ID, scalable dimension, and namespace. Any parameters that you don't specify are not changed by this update request.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace by using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget.

fn register_scalable_target( &self, input: RegisterScalableTargetRequest, diff --git a/rusoto/services/application-autoscaling/src/lib.rs b/rusoto/services/application-autoscaling/src/lib.rs index fa0826337a4..f74fe485e6e 100644 --- a/rusoto/services/application-autoscaling/src/lib.rs +++ b/rusoto/services/application-autoscaling/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

With Application Auto Scaling, you can configure automatic scaling for your scalable resources. You can use Application Auto Scaling to accomplish the following tasks:

  • Define scaling policies to automatically scale your AWS or custom resources

  • Scale your resources in response to CloudWatch alarms

  • Schedule one-time or recurring scaling actions

  • View the history of your scaling events

Application Auto Scaling can scale the following resources:

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

+//!

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon ECS services

  • Amazon EC2 Spot Fleet requests

  • Amazon EMR clusters

  • Amazon AppStream 2.0 fleets

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon Aurora Replicas

  • Amazon SageMaker endpoint variants

  • Custom resources provided by your own applications or services

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register AWS or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget action for any Application Auto Scaling scalable target. You can suspend and resume, individually or in combination, scale-out activities triggered by a scaling policy, scale-in activities triggered by a scaling policy, and scheduled scaling.

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

//! //! If you're using the service, you're probably looking for [ApplicationAutoScalingClient](struct.ApplicationAutoScalingClient.html) and [ApplicationAutoScaling](trait.ApplicationAutoScaling.html). diff --git a/rusoto/services/appmesh/Cargo.toml b/rusoto/services/appmesh/Cargo.toml new file mode 100644 index 00000000000..007d70da131 --- /dev/null +++ b/rusoto/services/appmesh/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - AWS App Mesh @ 2019-01-25" +documentation = "https://docs.rs/rusoto_appmesh" +keywords = ["AWS", "Amazon", "appmesh"] +license = "MIT" +name = "rusoto_appmesh" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/appmesh/README.md b/rusoto/services/appmesh/README.md new file mode 100644 index 00000000000..ea47fdfe915 --- /dev/null +++ b/rusoto/services/appmesh/README.md @@ -0,0 +1,52 @@ + +# Rusoto AppMesh +Rust SDK for AWS App Mesh + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_appmesh` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_appmesh = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_appmesh "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/appmesh/src/custom/mod.rs b/rusoto/services/appmesh/src/custom/mod.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/rusoto/services/appmesh/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/appmesh/src/generated.rs b/rusoto/services/appmesh/src/generated.rs new file mode 100644 index 00000000000..8845d7f0389 --- /dev/null +++ b/rusoto/services/appmesh/src/generated.rs @@ -0,0 +1,4915 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::param::{Params, ServiceParams}; +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +///

An object representing the access logging information for a virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AccessLog { + ///

The file object to send virtual node access logs to.

+ #[serde(rename = "file")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file: Option, +} + +///

An object representing the AWS Cloud Map attribute information for your virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsCloudMapInstanceAttribute { + ///

The name of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map service instance + /// that contains the specified key and value is returned.

+ #[serde(rename = "key")] + pub key: String, + ///

The value of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map service + /// instance that contains the specified key and value is returned.

+ #[serde(rename = "value")] + pub value: String, +} + +///

An object representing the AWS Cloud Map service discovery information for your virtual +/// node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsCloudMapServiceDiscovery { + ///

A string map that contains attributes with values that you can use to filter instances + /// by any custom attribute that you specified when you registered the instance. Only instances + /// that match all of the specified key/value pairs will be returned.

+ #[serde(rename = "attributes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub attributes: Option>, + ///

The name of the AWS Cloud Map namespace to use.

+ #[serde(rename = "namespaceName")] + pub namespace_name: String, + ///

The name of the AWS Cloud Map service to use.

+ #[serde(rename = "serviceName")] + pub service_name: String, +} + +///

An object representing the backends that a virtual node is expected to send outbound +/// traffic to.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Backend { + ///

Specifies a virtual service to use as a backend for a virtual node.

+ #[serde(rename = "virtualService")] + #[serde(skip_serializing_if = "Option::is_none")] + pub virtual_service: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateMeshInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name to use for the service mesh.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The service mesh specification to apply.

+ #[serde(rename = "spec")] + #[serde(skip_serializing_if = "Option::is_none")] + pub spec: Option, + ///

Optional metadata that you can apply to the service mesh to assist with categorization + /// and organization. Each tag consists of a key and an optional value, both of which you + /// define. Tag keys can have a maximum character length of 128 characters, and tag values can have + /// a maximum length of 256 characters.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateMeshOutput { + ///

The full description of your service mesh following the create call.

+ #[serde(rename = "mesh")] + pub mesh: MeshData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateRouteInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh to create the route in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name to use for the route.

+ #[serde(rename = "routeName")] + pub route_name: String, + ///

The route specification to apply.

+ #[serde(rename = "spec")] + pub spec: RouteSpec, + ///

Optional metadata that you can apply to the route to assist with categorization and + /// organization. Each tag consists of a key and an optional value, both of which you define. + /// Tag keys can have a maximum character length of 128 characters, and tag values can have + /// a maximum length of 256 characters.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The name of the virtual router in which to create the route.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateRouteOutput { + ///

The full description of your mesh following the create call.

+ #[serde(rename = "route")] + pub route: RouteData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateVirtualNodeInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh to create the virtual node in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The virtual node specification to apply.

+ #[serde(rename = "spec")] + pub spec: VirtualNodeSpec, + ///

Optional metadata that you can apply to the virtual node to assist with categorization + /// and organization. Each tag consists of a key and an optional value, both of which you + /// define. Tag keys can have a maximum character length of 128 characters, and tag values can have + /// a maximum length of 256 characters.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The name to use for the virtual node.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateVirtualNodeOutput { + ///

The full description of your virtual node following the create call.

+ #[serde(rename = "virtualNode")] + pub virtual_node: VirtualNodeData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateVirtualRouterInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh to create the virtual router in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The virtual router specification to apply.

+ #[serde(rename = "spec")] + pub spec: VirtualRouterSpec, + ///

Optional metadata that you can apply to the virtual router to assist with categorization + /// and organization. Each tag consists of a key and an optional value, both of which you + /// define. Tag keys can have a maximum character length of 128 characters, and tag values can have + /// a maximum length of 256 characters.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The name to use for the virtual router.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateVirtualRouterOutput { + ///

The full description of your virtual router following the create call.

+ #[serde(rename = "virtualRouter")] + pub virtual_router: VirtualRouterData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateVirtualServiceInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh to create the virtual service in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The virtual service specification to apply.

+ #[serde(rename = "spec")] + pub spec: VirtualServiceSpec, + ///

Optional metadata that you can apply to the virtual service to assist with + /// categorization and organization. Each tag consists of a key and an optional value, both of + /// which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have + /// a maximum length of 256 characters.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The name to use for the virtual service.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateVirtualServiceOutput { + ///

The full description of your virtual service following the create call.

+ #[serde(rename = "virtualService")] + pub virtual_service: VirtualServiceData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteMeshInput { + ///

The name of the service mesh to delete.

+ #[serde(rename = "meshName")] + pub mesh_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteMeshOutput { + ///

The service mesh that was deleted.

+ #[serde(rename = "mesh")] + pub mesh: MeshData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteRouteInput { + ///

The name of the service mesh to delete the route in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the route to delete.

+ #[serde(rename = "routeName")] + pub route_name: String, + ///

The name of the virtual router to delete the route in.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteRouteOutput { + ///

The route that was deleted.

+ #[serde(rename = "route")] + pub route: RouteData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteVirtualNodeInput { + ///

The name of the service mesh to delete the virtual node in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual node to delete.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteVirtualNodeOutput { + ///

The virtual node that was deleted.

+ #[serde(rename = "virtualNode")] + pub virtual_node: VirtualNodeData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteVirtualRouterInput { + ///

The name of the service mesh to delete the virtual router in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual router to delete.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteVirtualRouterOutput { + ///

The virtual router that was deleted.

+ #[serde(rename = "virtualRouter")] + pub virtual_router: VirtualRouterData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteVirtualServiceInput { + ///

The name of the service mesh to delete the virtual service in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual service to delete.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteVirtualServiceOutput { + ///

The virtual service that was deleted.

+ #[serde(rename = "virtualService")] + pub virtual_service: VirtualServiceData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeMeshInput { + ///

The name of the service mesh to describe.

+ #[serde(rename = "meshName")] + pub mesh_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeMeshOutput { + ///

The full description of your service mesh.

+ #[serde(rename = "mesh")] + pub mesh: MeshData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeRouteInput { + ///

The name of the service mesh that the route resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the route to describe.

+ #[serde(rename = "routeName")] + pub route_name: String, + ///

The name of the virtual router that the route is associated with.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeRouteOutput { + ///

The full description of your route.

+ #[serde(rename = "route")] + pub route: RouteData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeVirtualNodeInput { + ///

The name of the service mesh that the virtual node resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual node to describe.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeVirtualNodeOutput { + ///

The full description of your virtual node.

+ #[serde(rename = "virtualNode")] + pub virtual_node: VirtualNodeData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeVirtualRouterInput { + ///

The name of the service mesh that the virtual router resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual router to describe.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeVirtualRouterOutput { + ///

The full description of your virtual router.

+ #[serde(rename = "virtualRouter")] + pub virtual_router: VirtualRouterData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeVirtualServiceInput { + ///

The name of the service mesh that the virtual service resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual service to describe.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeVirtualServiceOutput { + ///

The full description of your virtual service.

+ #[serde(rename = "virtualService")] + pub virtual_service: VirtualServiceData, +} + +///

An object representing the DNS service discovery information for your virtual +/// node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DnsServiceDiscovery { + ///

Specifies the DNS service discovery hostname for the virtual node.

+ #[serde(rename = "hostname")] + pub hostname: String, +} + +///

An object representing the duration between retry attempts.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Duration { + ///

The unit of time between retry attempts.

+ #[serde(rename = "unit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unit: Option, + ///

The duration of time between retry attempts.

+ #[serde(rename = "value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +///

An object representing the egress filter rules for a service mesh.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct EgressFilter { + ///

The egress filter type. By default, the type is DROP_ALL, which allows + /// egress only from virtual nodes to other defined resources in the service mesh (and any + /// traffic to *.amazonaws.com for AWS API calls). You can set the egress filter + /// type to ALLOW_ALL to allow egress to any endpoint inside or outside of the + /// service mesh.

+ #[serde(rename = "type")] + pub type_: String, +} + +///

An object representing an access log file.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FileAccessLog { + ///

The file path to write access logs to. You can use /dev/stdout to send + /// access logs to standard out and configure your Envoy container to use a log driver, such as + /// awslogs, to export the access logs to a log storage service such as Amazon + /// CloudWatch Logs. You can also specify a path in the Envoy container's file system to write + /// the files to disk.

+ /// + ///
     <note>
+    /// <p>The Envoy process must have write permissions to the path that you specify here.
+    /// Otherwise, Envoy fails to bootstrap properly.</p>
+    /// </note>
+    /// 
+ #[serde(rename = "path")] + pub path: String, +} + +///

An object representing the method and value to match the header value sent with a request. Specify one match method.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HeaderMatchMethod { + ///

The header value sent by the client must match the specified value exactly.

+ #[serde(rename = "exact")] + #[serde(skip_serializing_if = "Option::is_none")] + pub exact: Option, + ///

The header value sent by the client must begin with the specified characters.

+ #[serde(rename = "prefix")] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefix: Option, + ///

The object that specifies the range of numbers that the header value sent by the client must be included in.

+ #[serde(rename = "range")] + #[serde(skip_serializing_if = "Option::is_none")] + pub range: Option, + ///

The header value sent by the client must include the specified characters.

+ #[serde(rename = "regex")] + #[serde(skip_serializing_if = "Option::is_none")] + pub regex: Option, + ///

The header value sent by the client must end with the specified characters.

+ #[serde(rename = "suffix")] + #[serde(skip_serializing_if = "Option::is_none")] + pub suffix: Option, +} + +///

An object representing the health check policy for a virtual node's listener.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HealthCheckPolicy { + ///

The number of consecutive successful health checks that must occur before declaring + /// listener healthy.

+ #[serde(rename = "healthyThreshold")] + pub healthy_threshold: i64, + ///

The time period in milliseconds between each health check execution.

+ #[serde(rename = "intervalMillis")] + pub interval_millis: i64, + ///

The destination path for the health check request. This is required only if the + /// specified protocol is HTTP. If the protocol is TCP, this parameter is ignored.

+ #[serde(rename = "path")] + #[serde(skip_serializing_if = "Option::is_none")] + pub path: Option, + ///

The destination port for the health check request. This port must match the port defined + /// in the PortMapping for the listener.

+ #[serde(rename = "port")] + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + ///

The protocol for the health check request.

+ #[serde(rename = "protocol")] + pub protocol: String, + ///

The amount of time to wait when receiving a response from the health check, in + /// milliseconds.

+ #[serde(rename = "timeoutMillis")] + pub timeout_millis: i64, + ///

The number of consecutive failed health checks that must occur before declaring a + /// virtual node unhealthy.

+ #[serde(rename = "unhealthyThreshold")] + pub unhealthy_threshold: i64, +} + +///

An object that represents a retry policy.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HttpRetryPolicy { + ///

Specify at least one of the following values.

+ /// + ///
     <ul>
+    /// <li>
+    /// <p>
+    /// <b>server-error</b> – HTTP status codes 500, 501,
+    /// 502, 503, 504, 505, 506, 507, 508, 510, and 511</p>
+    /// </li>
+    /// <li>
+    /// <p>
+    /// <b>gateway-error</b> – HTTP status codes 502,
+    /// 503, and 504</p>
+    /// </li>
+    /// <li>
+    /// <p>
+    /// <b>client-error</b> – HTTP status code 409</p>
+    /// </li>
+    /// <li>
+    /// <p>
+    /// <b>stream-error</b> – Retry on refused
+    /// stream</p>
+    /// </li>
+    /// </ul>
+    /// 
+ #[serde(rename = "httpRetryEvents")] + #[serde(skip_serializing_if = "Option::is_none")] + pub http_retry_events: Option>, + ///

The maximum number of retry attempts. If no value is specified, the default is 1.

+ #[serde(rename = "maxRetries")] + pub max_retries: i64, + ///

An object that represents the retry duration.

+ #[serde(rename = "perRetryTimeout")] + pub per_retry_timeout: Duration, + ///

Specify a valid value.

+ #[serde(rename = "tcpRetryEvents")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tcp_retry_events: Option>, +} + +///

An object representing the HTTP routing specification for a route.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HttpRoute { + ///

The action to take if a match is determined.

+ #[serde(rename = "action")] + pub action: HttpRouteAction, + ///

The criteria for determining an HTTP request match.

+ #[serde(rename = "match")] + pub match_: HttpRouteMatch, + ///

An object that represents a retry policy.

+ #[serde(rename = "retryPolicy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub retry_policy: Option, +} + +///

An object representing the traffic distribution requirements for matched HTTP +/// requests.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HttpRouteAction { + ///

The targets that traffic is routed to when a request matches the route. You can specify + /// one or more targets and their relative weights to distribute traffic with.

+ #[serde(rename = "weightedTargets")] + pub weighted_targets: Vec, +} + +///

An object representing the HTTP header in the request.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HttpRouteHeader { + ///

Specify True to match the opposite of the HeaderMatchMethod method and value. The default value is False.

+ #[serde(rename = "invert")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invert: Option, + ///

The HeaderMatchMethod object.

+ #[serde(rename = "match")] + #[serde(skip_serializing_if = "Option::is_none")] + pub match_: Option, + ///

A name for the HTTP header in the client request that will be matched on.

+ #[serde(rename = "name")] + pub name: String, +} + +///

An object representing the requirements for a route to match HTTP requests for a virtual +/// router.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HttpRouteMatch { + ///

The client request headers to match on.

+ #[serde(rename = "headers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub headers: Option>, + ///

The client request header method to match on.

+ #[serde(rename = "method")] + #[serde(skip_serializing_if = "Option::is_none")] + pub method: Option, + ///

Specifies the path to match requests with. This parameter must always start with + /// /, which by itself matches all requests to the virtual service name. You + /// can also match for path-based routing of requests. For example, if your virtual service + /// name is my-service.local and you want the route to match requests to + /// my-service.local/metrics, your prefix should be + /// /metrics.

+ #[serde(rename = "prefix")] + pub prefix: String, + ///

The client request header scheme to match on.

+ #[serde(rename = "scheme")] + #[serde(skip_serializing_if = "Option::is_none")] + pub scheme: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListMeshesInput { + ///

The maximum number of results returned by ListMeshes in paginated output. + /// When you use this parameter, ListMeshes returns only limit + /// results in a single page along with a nextToken response element. You can see + /// the remaining results of the initial request by sending another ListMeshes + /// request with the returned nextToken value. This value can be between + /// 1 and 100. If you don't use this parameter, + /// ListMeshes returns up to 100 results and a + /// nextToken value if applicable.

+ #[serde(rename = "limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The nextToken value returned from a previous paginated + /// ListMeshes request where limit was used and the results + /// exceeded the value of that parameter. Pagination continues from the end of the previous + /// results that returned the nextToken value.

+ /// + ///
     <note>
+    /// <p>This token should be treated as an opaque identifier that is used only to
+    /// retrieve the next items in a list and not for other programmatic purposes.</p>
+    /// </note>
+    /// 
+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListMeshesOutput { + ///

The list of existing service meshes.

+ #[serde(rename = "meshes")] + pub meshes: Vec, + ///

The nextToken value to include in a future ListMeshes request. + /// When the results of a ListMeshes request exceed limit, you can + /// use this value to retrieve the next page of results. This value is null when + /// there are no more results to return.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListRoutesInput { + ///

The maximum number of results returned by ListRoutes in paginated output. + /// When you use this parameter, ListRoutes returns only limit + /// results in a single page along with a nextToken response element. You can see + /// the remaining results of the initial request by sending another ListRoutes + /// request with the returned nextToken value. This value can be between + /// 1 and 100. If you don't use this parameter, + /// ListRoutes returns up to 100 results and a + /// nextToken value if applicable.

+ #[serde(rename = "limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The name of the service mesh to list routes in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The nextToken value returned from a previous paginated + /// ListRoutes request where limit was used and the results + /// exceeded the value of that parameter. Pagination continues from the end of the previous + /// results that returned the nextToken value.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The name of the virtual router to list routes in.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListRoutesOutput { + ///

The nextToken value to include in a future ListRoutes request. + /// When the results of a ListRoutes request exceed limit, you can + /// use this value to retrieve the next page of results. This value is null when + /// there are no more results to return.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The list of existing routes for the specified service mesh and virtual router.

+ #[serde(rename = "routes")] + pub routes: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceInput { + ///

The maximum number of tag results returned by ListTagsForResource in + /// paginated output. When this parameter is used, ListTagsForResource returns + /// only limit results in a single page along with a nextToken + /// response element. You can see the remaining results of the initial request by sending + /// another ListTagsForResource request with the returned nextToken + /// value. This value can be between 1 and 100. If you don't use + /// this parameter, ListTagsForResource returns up to 100 + /// results and a nextToken value if applicable.

+ #[serde(rename = "limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The nextToken value returned from a previous paginated + /// ListTagsForResource request where limit was used and the + /// results exceeded the value of that parameter. Pagination continues from the end of the + /// previous results that returned the nextToken value.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The Amazon Resource Name (ARN) that identifies the resource to list the tags for.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceOutput { + ///

The nextToken value to include in a future ListTagsForResource + /// request. When the results of a ListTagsForResource request exceed + /// limit, you can use this value to retrieve the next page of results. This + /// value is null when there are no more results to return.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The tags for the resource.

+ #[serde(rename = "tags")] + pub tags: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListVirtualNodesInput { + ///

The maximum number of results returned by ListVirtualNodes in paginated + /// output. When you use this parameter, ListVirtualNodes returns only + /// limit results in a single page along with a nextToken response + /// element. You can see the remaining results of the initial request by sending another + /// ListVirtualNodes request with the returned nextToken value. + /// This value can be between 1 and 100. If you don't use this + /// parameter, ListVirtualNodes returns up to 100 results and a + /// nextToken value if applicable.

+ #[serde(rename = "limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The name of the service mesh to list virtual nodes in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The nextToken value returned from a previous paginated + /// ListVirtualNodes request where limit was used and the results + /// exceeded the value of that parameter. Pagination continues from the end of the previous + /// results that returned the nextToken value.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListVirtualNodesOutput { + ///

The nextToken value to include in a future ListVirtualNodes + /// request. When the results of a ListVirtualNodes request exceed + /// limit, you can use this value to retrieve the next page of results. This + /// value is null when there are no more results to return.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The list of existing virtual nodes for the specified service mesh.

+ #[serde(rename = "virtualNodes")] + pub virtual_nodes: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListVirtualRoutersInput { + ///

The maximum number of results returned by ListVirtualRouters in paginated + /// output. When you use this parameter, ListVirtualRouters returns only + /// limit results in a single page along with a nextToken response + /// element. You can see the remaining results of the initial request by sending another + /// ListVirtualRouters request with the returned nextToken value. + /// This value can be between 1 and 100. If you don't use this + /// parameter, ListVirtualRouters returns up to 100 results and + /// a nextToken value if applicable.

+ #[serde(rename = "limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The name of the service mesh to list virtual routers in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The nextToken value returned from a previous paginated + /// ListVirtualRouters request where limit was used and the + /// results exceeded the value of that parameter. Pagination continues from the end of the + /// previous results that returned the nextToken value.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListVirtualRoutersOutput { + ///

The nextToken value to include in a future ListVirtualRouters + /// request. When the results of a ListVirtualRouters request exceed + /// limit, you can use this value to retrieve the next page of results. This + /// value is null when there are no more results to return.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The list of existing virtual routers for the specified service mesh.

+ #[serde(rename = "virtualRouters")] + pub virtual_routers: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListVirtualServicesInput { + ///

The maximum number of results returned by ListVirtualServices in paginated + /// output. When you use this parameter, ListVirtualServices returns only + /// limit results in a single page along with a nextToken response + /// element. You can see the remaining results of the initial request by sending another + /// ListVirtualServices request with the returned nextToken value. + /// This value can be between 1 and 100. If you don't use this + /// parameter, ListVirtualServices returns up to 100 results and + /// a nextToken value if applicable.

+ #[serde(rename = "limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The name of the service mesh to list virtual services in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The nextToken value returned from a previous paginated + /// ListVirtualServices request where limit was used and the + /// results exceeded the value of that parameter. Pagination continues from the end of the + /// previous results that returned the nextToken value.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListVirtualServicesOutput { + ///

The nextToken value to include in a future ListVirtualServices + /// request. When the results of a ListVirtualServices request exceed + /// limit, you can use this value to retrieve the next page of results. This + /// value is null when there are no more results to return.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The list of existing virtual services for the specified service mesh.

+ #[serde(rename = "virtualServices")] + pub virtual_services: Vec, +} + +///

An object representing a listener for a virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Listener { + ///

The health check information for the listener.

+ #[serde(rename = "healthCheck")] + #[serde(skip_serializing_if = "Option::is_none")] + pub health_check: Option, + ///

The port mapping information for the listener.

+ #[serde(rename = "portMapping")] + pub port_mapping: PortMapping, +} + +///

An object representing the logging information for a virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Logging { + ///

The access log configuration for a virtual node.

+ #[serde(rename = "accessLog")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_log: Option, +} + +///

The range of values to match on. The first character of the range is included in the range, though the last character is not. For example, if the range specified were 1-100, only values 1-99 would be matched.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MatchRange { + ///

The end of the range.

+ #[serde(rename = "end")] + pub end: i64, + ///

The start of the range.

+ #[serde(rename = "start")] + pub start: i64, +} + +///

An object representing a service mesh returned by a describe operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MeshData { + ///

The name of the service mesh.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The associated metadata for the service mesh.

+ #[serde(rename = "metadata")] + pub metadata: ResourceMetadata, + ///

The associated specification for the service mesh.

+ #[serde(rename = "spec")] + pub spec: MeshSpec, + ///

The status of the service mesh.

+ #[serde(rename = "status")] + pub status: MeshStatus, +} + +///

An object representing a service mesh returned by a list operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MeshRef { + ///

The full Amazon Resource Name (ARN) of the service mesh.

+ #[serde(rename = "arn")] + pub arn: String, + ///

The name of the service mesh.

+ #[serde(rename = "meshName")] + pub mesh_name: String, +} + +///

An object representing the specification of a service mesh.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MeshSpec { + ///

The egress filter rules for the service mesh.

+ #[serde(rename = "egressFilter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub egress_filter: Option, +} + +///

An object representing the status of a service mesh.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MeshStatus { + ///

The current mesh status.

+ #[serde(rename = "status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, +} + +///

An object representing a virtual node or virtual router listener port mapping.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PortMapping { + ///

The port used for the port mapping.

+ #[serde(rename = "port")] + pub port: i64, + ///

The protocol used for the port mapping.

+ #[serde(rename = "protocol")] + pub protocol: String, +} + +///

An object representing metadata for a resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ResourceMetadata { + ///

The full Amazon Resource Name (ARN) for the resource.

+ #[serde(rename = "arn")] + pub arn: String, + ///

The Unix epoch timestamp in seconds for when the resource was created.

+ #[serde(rename = "createdAt")] + pub created_at: f64, + ///

The Unix epoch timestamp in seconds for when the resource was last updated.

+ #[serde(rename = "lastUpdatedAt")] + pub last_updated_at: f64, + ///

The unique identifier for the resource.

+ #[serde(rename = "uid")] + pub uid: String, + ///

The version of the resource. Resources are created at version 1, and this version is + /// incremented each time that they're updated.

+ #[serde(rename = "version")] + pub version: i64, +} + +///

An object representing a route returned by a describe operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RouteData { + ///

The name of the service mesh that the route resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The associated metadata for the route.

+ #[serde(rename = "metadata")] + pub metadata: ResourceMetadata, + ///

The name of the route.

+ #[serde(rename = "routeName")] + pub route_name: String, + ///

The specifications of the route.

+ #[serde(rename = "spec")] + pub spec: RouteSpec, + ///

The status of the route.

+ #[serde(rename = "status")] + pub status: RouteStatus, + ///

The virtual router that the route is associated with.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +///

An object representing a route returned by a list operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RouteRef { + ///

The full Amazon Resource Name (ARN) for the route.

+ #[serde(rename = "arn")] + pub arn: String, + ///

The name of the service mesh that the route resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the route.

+ #[serde(rename = "routeName")] + pub route_name: String, + ///

The virtual router that the route is associated with.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +///

An object representing the specification of a route.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RouteSpec { + ///

The HTTP routing information for the route.

+ #[serde(rename = "httpRoute")] + #[serde(skip_serializing_if = "Option::is_none")] + pub http_route: Option, + ///

The priority for the route. Routes are matched based on the specified value, where 0 is the highest priority.

+ #[serde(rename = "priority")] + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, + ///

The TCP routing information for the route.

+ #[serde(rename = "tcpRoute")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tcp_route: Option, +} + +///

An object representing the current status of a route.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RouteStatus { + ///

The current status for the route.

+ #[serde(rename = "status")] + pub status: String, +} + +///

An object representing the service discovery information for a virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ServiceDiscovery { + ///

Specifies any AWS Cloud Map information for the virtual node.

+ #[serde(rename = "awsCloudMap")] + #[serde(skip_serializing_if = "Option::is_none")] + pub aws_cloud_map: Option, + ///

Specifies the DNS information for the virtual node.

+ #[serde(rename = "dns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns: Option, +} + +///

Optional metadata that you apply to a resource to assist with categorization and +/// organization. Each tag consists of a key and an optional value, both of which you define. +/// Tag keys can have a maximum character length of 128 characters, and tag values can have +/// a maximum length of 256 characters.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TagRef { + ///

One part of a key-value pair that make up a tag. A key is a general label + /// that acts like a category for more specific tag values.

+ #[serde(rename = "key")] + pub key: String, + ///

The optional part of a key-value pair that make up a tag. A value acts as a + /// descriptor within a tag category (key).

+ #[serde(rename = "value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceInput { + ///

The Amazon Resource Name (ARN) of the resource to add tags to.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

The tags to add to the resource. A tag is an array of key-value pairs. + /// Tag keys can have a maximum character length of 128 characters, and tag values can have + /// a maximum length of 256 characters.

+ #[serde(rename = "tags")] + pub tags: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceOutput {} + +///

An object representing the TCP routing specification for a route.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TcpRoute { + ///

The action to take if a match is determined.

+ #[serde(rename = "action")] + pub action: TcpRouteAction, +} + +///

An object representing the traffic distribution requirements for matched TCP +/// requests.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TcpRouteAction { + ///

The targets that traffic is routed to when a request matches the route. You can specify + /// one or more targets and their relative weights to distribute traffic with.

+ #[serde(rename = "weightedTargets")] + pub weighted_targets: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceInput { + ///

The Amazon Resource Name (ARN) of the resource to delete tags from.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

The keys of the tags to be removed.

+ #[serde(rename = "tagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceOutput {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateMeshInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh to update.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The service mesh specification to apply.

+ #[serde(rename = "spec")] + #[serde(skip_serializing_if = "Option::is_none")] + pub spec: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateMeshOutput { + #[serde(rename = "mesh")] + pub mesh: MeshData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateRouteInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh that the route resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the route to update.

+ #[serde(rename = "routeName")] + pub route_name: String, + ///

The new route specification to apply. This overwrites the existing data.

+ #[serde(rename = "spec")] + pub spec: RouteSpec, + ///

The name of the virtual router that the route is associated with.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateRouteOutput { + ///

A full description of the route that was updated.

+ #[serde(rename = "route")] + pub route: RouteData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateVirtualNodeInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh that the virtual node resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The new virtual node specification to apply. This overwrites the existing data.

+ #[serde(rename = "spec")] + pub spec: VirtualNodeSpec, + ///

The name of the virtual node to update.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateVirtualNodeOutput { + ///

A full description of the virtual node that was updated.

+ #[serde(rename = "virtualNode")] + pub virtual_node: VirtualNodeData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateVirtualRouterInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh that the virtual router resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The new virtual router specification to apply. This overwrites the existing data.

+ #[serde(rename = "spec")] + pub spec: VirtualRouterSpec, + ///

The name of the virtual router to update.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateVirtualRouterOutput { + ///

A full description of the virtual router that was updated.

+ #[serde(rename = "virtualRouter")] + pub virtual_router: VirtualRouterData, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateVirtualServiceInput { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the + /// request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

+ #[serde(rename = "clientToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_token: Option, + ///

The name of the service mesh that the virtual service resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The new virtual service specification to apply. This overwrites the existing + /// data.

+ #[serde(rename = "spec")] + pub spec: VirtualServiceSpec, + ///

The name of the virtual service to update.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateVirtualServiceOutput { + ///

A full description of the virtual service that was updated.

+ #[serde(rename = "virtualService")] + pub virtual_service: VirtualServiceData, +} + +///

An object representing a virtual node returned by a describe operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualNodeData { + ///

The name of the service mesh that the virtual node resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The associated metadata for the virtual node.

+ #[serde(rename = "metadata")] + pub metadata: ResourceMetadata, + ///

The specifications of the virtual node.

+ #[serde(rename = "spec")] + pub spec: VirtualNodeSpec, + ///

The current status for the virtual node.

+ #[serde(rename = "status")] + pub status: VirtualNodeStatus, + ///

The name of the virtual node.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +///

An object representing a virtual node returned by a list operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualNodeRef { + ///

The full Amazon Resource Name (ARN) for the virtual node.

+ #[serde(rename = "arn")] + pub arn: String, + ///

The name of the service mesh that the virtual node resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual node.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +///

An object representing a virtual node service provider.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualNodeServiceProvider { + ///

The name of the virtual node that is acting as a service provider.

+ #[serde(rename = "virtualNodeName")] + pub virtual_node_name: String, +} + +///

An object representing the specification of a virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualNodeSpec { + ///

The backends that the virtual node is expected to send outbound traffic to.

+ #[serde(rename = "backends")] + #[serde(skip_serializing_if = "Option::is_none")] + pub backends: Option>, + ///

The listeners that the virtual node is expected to receive inbound traffic from. + /// Currently only one listener is supported per virtual node.

+ #[serde(rename = "listeners")] + #[serde(skip_serializing_if = "Option::is_none")] + pub listeners: Option>, + ///

The inbound and outbound access logging information for the virtual node.

+ #[serde(rename = "logging")] + #[serde(skip_serializing_if = "Option::is_none")] + pub logging: Option, + ///

The service discovery information for the virtual node. If your virtual node does not + /// expect ingress traffic, you can omit this parameter.

+ #[serde(rename = "serviceDiscovery")] + #[serde(skip_serializing_if = "Option::is_none")] + pub service_discovery: Option, +} + +///

An object representing the current status of the virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualNodeStatus { + ///

The current status of the virtual node.

+ #[serde(rename = "status")] + pub status: String, +} + +///

An object representing a virtual router returned by a describe operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualRouterData { + ///

The name of the service mesh that the virtual router resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The associated metadata for the virtual router.

+ #[serde(rename = "metadata")] + pub metadata: ResourceMetadata, + ///

The specifications of the virtual router.

+ #[serde(rename = "spec")] + pub spec: VirtualRouterSpec, + ///

The current status of the virtual router.

+ #[serde(rename = "status")] + pub status: VirtualRouterStatus, + ///

The name of the virtual router.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +///

An object representing a virtual router listener.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualRouterListener { + #[serde(rename = "portMapping")] + pub port_mapping: PortMapping, +} + +///

An object representing a virtual router returned by a list operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualRouterRef { + ///

The full Amazon Resource Name (ARN) for the virtual router.

+ #[serde(rename = "arn")] + pub arn: String, + ///

The name of the service mesh that the virtual router resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual router.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +///

An object representing a virtual node service provider.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualRouterServiceProvider { + ///

The name of the virtual router that is acting as a service provider.

+ #[serde(rename = "virtualRouterName")] + pub virtual_router_name: String, +} + +///

An object representing the specification of a virtual router.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualRouterSpec { + ///

The listeners that the virtual router is expected to receive inbound traffic from. + /// Currently only one listener is supported per virtual router.

+ #[serde(rename = "listeners")] + #[serde(skip_serializing_if = "Option::is_none")] + pub listeners: Option>, +} + +///

An object representing the status of a virtual router.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualRouterStatus { + ///

The current status of the virtual router.

+ #[serde(rename = "status")] + pub status: String, +} + +///

An object representing a virtual service backend for a virtual node.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualServiceBackend { + ///

The name of the virtual service that is acting as a virtual node backend.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +///

An object representing a virtual service returned by a describe operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualServiceData { + ///

The name of the service mesh that the virtual service resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + #[serde(rename = "metadata")] + pub metadata: ResourceMetadata, + ///

The specifications of the virtual service.

+ #[serde(rename = "spec")] + pub spec: VirtualServiceSpec, + ///

The current status of the virtual service.

+ #[serde(rename = "status")] + pub status: VirtualServiceStatus, + ///

The name of the virtual service.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +///

An object representing the provider for a virtual service.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualServiceProvider { + ///

The virtual node associated with a virtual service.

+ #[serde(rename = "virtualNode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub virtual_node: Option, + ///

The virtual router associated with a virtual service.

+ #[serde(rename = "virtualRouter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub virtual_router: Option, +} + +///

An object representing a virtual service returned by a list operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualServiceRef { + ///

The full Amazon Resource Name (ARN) for the virtual service.

+ #[serde(rename = "arn")] + pub arn: String, + ///

The name of the service mesh that the virtual service resides in.

+ #[serde(rename = "meshName")] + pub mesh_name: String, + ///

The name of the virtual service.

+ #[serde(rename = "virtualServiceName")] + pub virtual_service_name: String, +} + +///

An object representing the specification of a virtual service.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VirtualServiceSpec { + ///

The App Mesh object that is acting as the provider for a virtual service. You can specify + /// a single virtual node or virtual router.

+ #[serde(rename = "provider")] + #[serde(skip_serializing_if = "Option::is_none")] + pub provider: Option, +} + +///

An object representing the status of a virtual service.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct VirtualServiceStatus { + ///

The current status of the virtual service.

+ #[serde(rename = "status")] + pub status: String, +} + +///

An object representing a target and its relative weight. Traffic is distributed across +/// targets according to their relative weight. For example, a weighted target with a relative +/// weight of 50 receives five times as much traffic as one with a relative weight of +/// 10.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct WeightedTarget { + ///

The virtual node to associate with the weighted target.

+ #[serde(rename = "virtualNode")] + pub virtual_node: String, + ///

The relative weight of the weighted target.

+ #[serde(rename = "weight")] + pub weight: i64, +} + +/// Errors returned by CreateMesh +#[derive(Debug, PartialEq)] +pub enum CreateMeshError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl CreateMeshError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(CreateMeshError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(CreateMeshError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(CreateMeshError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(CreateMeshError::InternalServerError(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateMeshError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(CreateMeshError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateMeshError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(CreateMeshError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateMeshError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateMeshError { + fn description(&self) -> &str { + match *self { + CreateMeshError::BadRequest(ref cause) => cause, + CreateMeshError::Conflict(ref cause) => cause, + CreateMeshError::Forbidden(ref cause) => cause, + CreateMeshError::InternalServerError(ref cause) => cause, + CreateMeshError::LimitExceeded(ref cause) => cause, + CreateMeshError::NotFound(ref cause) => cause, + CreateMeshError::ServiceUnavailable(ref cause) => cause, + CreateMeshError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by CreateRoute +#[derive(Debug, PartialEq)] +pub enum CreateRouteError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl CreateRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(CreateRouteError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(CreateRouteError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(CreateRouteError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(CreateRouteError::InternalServerError(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateRouteError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(CreateRouteError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateRouteError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(CreateRouteError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateRouteError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateRouteError { + fn description(&self) -> &str { + match *self { + CreateRouteError::BadRequest(ref cause) => cause, + CreateRouteError::Conflict(ref cause) => cause, + CreateRouteError::Forbidden(ref cause) => cause, + CreateRouteError::InternalServerError(ref cause) => cause, + CreateRouteError::LimitExceeded(ref cause) => cause, + CreateRouteError::NotFound(ref cause) => cause, + CreateRouteError::ServiceUnavailable(ref cause) => cause, + CreateRouteError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by CreateVirtualNode +#[derive(Debug, PartialEq)] +pub enum CreateVirtualNodeError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl CreateVirtualNodeError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(CreateVirtualNodeError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(CreateVirtualNodeError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(CreateVirtualNodeError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(CreateVirtualNodeError::InternalServerError( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(CreateVirtualNodeError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(CreateVirtualNodeError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateVirtualNodeError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(CreateVirtualNodeError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateVirtualNodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateVirtualNodeError { + fn description(&self) -> &str { + match *self { + CreateVirtualNodeError::BadRequest(ref cause) => cause, + CreateVirtualNodeError::Conflict(ref cause) => cause, + CreateVirtualNodeError::Forbidden(ref cause) => cause, + CreateVirtualNodeError::InternalServerError(ref cause) => cause, + CreateVirtualNodeError::LimitExceeded(ref cause) => cause, + CreateVirtualNodeError::NotFound(ref cause) => cause, + CreateVirtualNodeError::ServiceUnavailable(ref cause) => cause, + CreateVirtualNodeError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by CreateVirtualRouter +#[derive(Debug, PartialEq)] +pub enum CreateVirtualRouterError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl CreateVirtualRouterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(CreateVirtualRouterError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(CreateVirtualRouterError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(CreateVirtualRouterError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(CreateVirtualRouterError::InternalServerError( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(CreateVirtualRouterError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(CreateVirtualRouterError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateVirtualRouterError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(CreateVirtualRouterError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateVirtualRouterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateVirtualRouterError { + fn description(&self) -> &str { + match *self { + CreateVirtualRouterError::BadRequest(ref cause) => cause, + CreateVirtualRouterError::Conflict(ref cause) => cause, + CreateVirtualRouterError::Forbidden(ref cause) => cause, + CreateVirtualRouterError::InternalServerError(ref cause) => cause, + CreateVirtualRouterError::LimitExceeded(ref cause) => cause, + CreateVirtualRouterError::NotFound(ref cause) => cause, + CreateVirtualRouterError::ServiceUnavailable(ref cause) => cause, + CreateVirtualRouterError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by CreateVirtualService +#[derive(Debug, PartialEq)] +pub enum CreateVirtualServiceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl CreateVirtualServiceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(CreateVirtualServiceError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(CreateVirtualServiceError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(CreateVirtualServiceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(CreateVirtualServiceError::InternalServerError( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(CreateVirtualServiceError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(CreateVirtualServiceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateVirtualServiceError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(CreateVirtualServiceError::TooManyRequests( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateVirtualServiceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateVirtualServiceError { + fn description(&self) -> &str { + match *self { + CreateVirtualServiceError::BadRequest(ref cause) => cause, + CreateVirtualServiceError::Conflict(ref cause) => cause, + CreateVirtualServiceError::Forbidden(ref cause) => cause, + CreateVirtualServiceError::InternalServerError(ref cause) => cause, + CreateVirtualServiceError::LimitExceeded(ref cause) => cause, + CreateVirtualServiceError::NotFound(ref cause) => cause, + CreateVirtualServiceError::ServiceUnavailable(ref cause) => cause, + CreateVirtualServiceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DeleteMesh +#[derive(Debug, PartialEq)] +pub enum DeleteMeshError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

You can't delete the specified resource because it's in use or required by another + /// resource.

+ ResourceInUse(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DeleteMeshError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteMeshError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DeleteMeshError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DeleteMeshError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DeleteMeshError::NotFound(err.msg)) + } + "ResourceInUseException" => { + return RusotoError::Service(DeleteMeshError::ResourceInUse(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteMeshError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(DeleteMeshError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteMeshError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteMeshError { + fn description(&self) -> &str { + match *self { + DeleteMeshError::BadRequest(ref cause) => cause, + DeleteMeshError::Forbidden(ref cause) => cause, + DeleteMeshError::InternalServerError(ref cause) => cause, + DeleteMeshError::NotFound(ref cause) => cause, + DeleteMeshError::ResourceInUse(ref cause) => cause, + DeleteMeshError::ServiceUnavailable(ref cause) => cause, + DeleteMeshError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DeleteRoute +#[derive(Debug, PartialEq)] +pub enum DeleteRouteError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

You can't delete the specified resource because it's in use or required by another + /// resource.

+ ResourceInUse(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DeleteRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteRouteError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DeleteRouteError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DeleteRouteError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DeleteRouteError::NotFound(err.msg)) + } + "ResourceInUseException" => { + return RusotoError::Service(DeleteRouteError::ResourceInUse(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteRouteError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(DeleteRouteError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteRouteError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteRouteError { + fn description(&self) -> &str { + match *self { + DeleteRouteError::BadRequest(ref cause) => cause, + DeleteRouteError::Forbidden(ref cause) => cause, + DeleteRouteError::InternalServerError(ref cause) => cause, + DeleteRouteError::NotFound(ref cause) => cause, + DeleteRouteError::ResourceInUse(ref cause) => cause, + DeleteRouteError::ServiceUnavailable(ref cause) => cause, + DeleteRouteError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DeleteVirtualNode +#[derive(Debug, PartialEq)] +pub enum DeleteVirtualNodeError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

You can't delete the specified resource because it's in use or required by another + /// resource.

+ ResourceInUse(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DeleteVirtualNodeError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteVirtualNodeError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DeleteVirtualNodeError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DeleteVirtualNodeError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(DeleteVirtualNodeError::NotFound(err.msg)) + } + "ResourceInUseException" => { + return RusotoError::Service(DeleteVirtualNodeError::ResourceInUse(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteVirtualNodeError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(DeleteVirtualNodeError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteVirtualNodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteVirtualNodeError { + fn description(&self) -> &str { + match *self { + DeleteVirtualNodeError::BadRequest(ref cause) => cause, + DeleteVirtualNodeError::Forbidden(ref cause) => cause, + DeleteVirtualNodeError::InternalServerError(ref cause) => cause, + DeleteVirtualNodeError::NotFound(ref cause) => cause, + DeleteVirtualNodeError::ResourceInUse(ref cause) => cause, + DeleteVirtualNodeError::ServiceUnavailable(ref cause) => cause, + DeleteVirtualNodeError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DeleteVirtualRouter +#[derive(Debug, PartialEq)] +pub enum DeleteVirtualRouterError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

You can't delete the specified resource because it's in use or required by another + /// resource.

+ ResourceInUse(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DeleteVirtualRouterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteVirtualRouterError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DeleteVirtualRouterError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DeleteVirtualRouterError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(DeleteVirtualRouterError::NotFound(err.msg)) + } + "ResourceInUseException" => { + return RusotoError::Service(DeleteVirtualRouterError::ResourceInUse(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteVirtualRouterError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(DeleteVirtualRouterError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteVirtualRouterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteVirtualRouterError { + fn description(&self) -> &str { + match *self { + DeleteVirtualRouterError::BadRequest(ref cause) => cause, + DeleteVirtualRouterError::Forbidden(ref cause) => cause, + DeleteVirtualRouterError::InternalServerError(ref cause) => cause, + DeleteVirtualRouterError::NotFound(ref cause) => cause, + DeleteVirtualRouterError::ResourceInUse(ref cause) => cause, + DeleteVirtualRouterError::ServiceUnavailable(ref cause) => cause, + DeleteVirtualRouterError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DeleteVirtualService +#[derive(Debug, PartialEq)] +pub enum DeleteVirtualServiceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DeleteVirtualServiceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteVirtualServiceError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DeleteVirtualServiceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DeleteVirtualServiceError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(DeleteVirtualServiceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteVirtualServiceError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(DeleteVirtualServiceError::TooManyRequests( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteVirtualServiceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteVirtualServiceError { + fn description(&self) -> &str { + match *self { + DeleteVirtualServiceError::BadRequest(ref cause) => cause, + DeleteVirtualServiceError::Forbidden(ref cause) => cause, + DeleteVirtualServiceError::InternalServerError(ref cause) => cause, + DeleteVirtualServiceError::NotFound(ref cause) => cause, + DeleteVirtualServiceError::ServiceUnavailable(ref cause) => cause, + DeleteVirtualServiceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DescribeMesh +#[derive(Debug, PartialEq)] +pub enum DescribeMeshError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DescribeMeshError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DescribeMeshError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DescribeMeshError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DescribeMeshError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DescribeMeshError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeMeshError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(DescribeMeshError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeMeshError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeMeshError { + fn description(&self) -> &str { + match *self { + DescribeMeshError::BadRequest(ref cause) => cause, + DescribeMeshError::Forbidden(ref cause) => cause, + DescribeMeshError::InternalServerError(ref cause) => cause, + DescribeMeshError::NotFound(ref cause) => cause, + DescribeMeshError::ServiceUnavailable(ref cause) => cause, + DescribeMeshError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DescribeRoute +#[derive(Debug, PartialEq)] +pub enum DescribeRouteError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DescribeRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DescribeRouteError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DescribeRouteError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DescribeRouteError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DescribeRouteError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeRouteError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(DescribeRouteError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeRouteError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeRouteError { + fn description(&self) -> &str { + match *self { + DescribeRouteError::BadRequest(ref cause) => cause, + DescribeRouteError::Forbidden(ref cause) => cause, + DescribeRouteError::InternalServerError(ref cause) => cause, + DescribeRouteError::NotFound(ref cause) => cause, + DescribeRouteError::ServiceUnavailable(ref cause) => cause, + DescribeRouteError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DescribeVirtualNode +#[derive(Debug, PartialEq)] +pub enum DescribeVirtualNodeError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DescribeVirtualNodeError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DescribeVirtualNodeError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DescribeVirtualNodeError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DescribeVirtualNodeError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(DescribeVirtualNodeError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeVirtualNodeError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(DescribeVirtualNodeError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeVirtualNodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeVirtualNodeError { + fn description(&self) -> &str { + match *self { + DescribeVirtualNodeError::BadRequest(ref cause) => cause, + DescribeVirtualNodeError::Forbidden(ref cause) => cause, + DescribeVirtualNodeError::InternalServerError(ref cause) => cause, + DescribeVirtualNodeError::NotFound(ref cause) => cause, + DescribeVirtualNodeError::ServiceUnavailable(ref cause) => cause, + DescribeVirtualNodeError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DescribeVirtualRouter +#[derive(Debug, PartialEq)] +pub enum DescribeVirtualRouterError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DescribeVirtualRouterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DescribeVirtualRouterError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DescribeVirtualRouterError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DescribeVirtualRouterError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(DescribeVirtualRouterError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeVirtualRouterError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(DescribeVirtualRouterError::TooManyRequests( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeVirtualRouterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeVirtualRouterError { + fn description(&self) -> &str { + match *self { + DescribeVirtualRouterError::BadRequest(ref cause) => cause, + DescribeVirtualRouterError::Forbidden(ref cause) => cause, + DescribeVirtualRouterError::InternalServerError(ref cause) => cause, + DescribeVirtualRouterError::NotFound(ref cause) => cause, + DescribeVirtualRouterError::ServiceUnavailable(ref cause) => cause, + DescribeVirtualRouterError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by DescribeVirtualService +#[derive(Debug, PartialEq)] +pub enum DescribeVirtualServiceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl DescribeVirtualServiceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DescribeVirtualServiceError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(DescribeVirtualServiceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(DescribeVirtualServiceError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(DescribeVirtualServiceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeVirtualServiceError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(DescribeVirtualServiceError::TooManyRequests( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeVirtualServiceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeVirtualServiceError { + fn description(&self) -> &str { + match *self { + DescribeVirtualServiceError::BadRequest(ref cause) => cause, + DescribeVirtualServiceError::Forbidden(ref cause) => cause, + DescribeVirtualServiceError::InternalServerError(ref cause) => cause, + DescribeVirtualServiceError::NotFound(ref cause) => cause, + DescribeVirtualServiceError::ServiceUnavailable(ref cause) => cause, + DescribeVirtualServiceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by ListMeshes +#[derive(Debug, PartialEq)] +pub enum ListMeshesError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl ListMeshesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListMeshesError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(ListMeshesError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListMeshesError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(ListMeshesError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListMeshesError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(ListMeshesError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListMeshesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListMeshesError { + fn description(&self) -> &str { + match *self { + ListMeshesError::BadRequest(ref cause) => cause, + ListMeshesError::Forbidden(ref cause) => cause, + ListMeshesError::InternalServerError(ref cause) => cause, + ListMeshesError::NotFound(ref cause) => cause, + ListMeshesError::ServiceUnavailable(ref cause) => cause, + ListMeshesError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by ListRoutes +#[derive(Debug, PartialEq)] +pub enum ListRoutesError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl ListRoutesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListRoutesError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(ListRoutesError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListRoutesError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(ListRoutesError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListRoutesError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(ListRoutesError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListRoutesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListRoutesError { + fn description(&self) -> &str { + match *self { + ListRoutesError::BadRequest(ref cause) => cause, + ListRoutesError::Forbidden(ref cause) => cause, + ListRoutesError::InternalServerError(ref cause) => cause, + ListRoutesError::NotFound(ref cause) => cause, + ListRoutesError::ServiceUnavailable(ref cause) => cause, + ListRoutesError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListTagsForResourceError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(ListTagsForResourceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListTagsForResourceError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListTagsForResourceError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(ListTagsForResourceError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::BadRequest(ref cause) => cause, + ListTagsForResourceError::Forbidden(ref cause) => cause, + ListTagsForResourceError::InternalServerError(ref cause) => cause, + ListTagsForResourceError::NotFound(ref cause) => cause, + ListTagsForResourceError::ServiceUnavailable(ref cause) => cause, + ListTagsForResourceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by ListVirtualNodes +#[derive(Debug, PartialEq)] +pub enum ListVirtualNodesError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl ListVirtualNodesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListVirtualNodesError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(ListVirtualNodesError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListVirtualNodesError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(ListVirtualNodesError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListVirtualNodesError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(ListVirtualNodesError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListVirtualNodesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListVirtualNodesError { + fn description(&self) -> &str { + match *self { + ListVirtualNodesError::BadRequest(ref cause) => cause, + ListVirtualNodesError::Forbidden(ref cause) => cause, + ListVirtualNodesError::InternalServerError(ref cause) => cause, + ListVirtualNodesError::NotFound(ref cause) => cause, + ListVirtualNodesError::ServiceUnavailable(ref cause) => cause, + ListVirtualNodesError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by ListVirtualRouters +#[derive(Debug, PartialEq)] +pub enum ListVirtualRoutersError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl ListVirtualRoutersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListVirtualRoutersError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(ListVirtualRoutersError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListVirtualRoutersError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(ListVirtualRoutersError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListVirtualRoutersError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(ListVirtualRoutersError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListVirtualRoutersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListVirtualRoutersError { + fn description(&self) -> &str { + match *self { + ListVirtualRoutersError::BadRequest(ref cause) => cause, + ListVirtualRoutersError::Forbidden(ref cause) => cause, + ListVirtualRoutersError::InternalServerError(ref cause) => cause, + ListVirtualRoutersError::NotFound(ref cause) => cause, + ListVirtualRoutersError::ServiceUnavailable(ref cause) => cause, + ListVirtualRoutersError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by ListVirtualServices +#[derive(Debug, PartialEq)] +pub enum ListVirtualServicesError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl ListVirtualServicesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListVirtualServicesError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(ListVirtualServicesError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListVirtualServicesError::InternalServerError( + err.msg, + )) + } + "NotFoundException" => { + return RusotoError::Service(ListVirtualServicesError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListVirtualServicesError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(ListVirtualServicesError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListVirtualServicesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListVirtualServicesError { + fn description(&self) -> &str { + match *self { + ListVirtualServicesError::BadRequest(ref cause) => cause, + ListVirtualServicesError::Forbidden(ref cause) => cause, + ListVirtualServicesError::InternalServerError(ref cause) => cause, + ListVirtualServicesError::NotFound(ref cause) => cause, + ListVirtualServicesError::ServiceUnavailable(ref cause) => cause, + ListVirtualServicesError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), + ///

The request exceeds the maximum allowed number of tags allowed per resource. The current + /// limit is 50 user tags per resource. You must reduce the number of tags in the request. None + /// of the tags in this request were applied.

+ TooManyTags(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(TagResourceError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(TagResourceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(TagResourceError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(TagResourceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(TagResourceError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(TagResourceError::TooManyRequests(err.msg)) + } + "TooManyTagsException" => { + return RusotoError::Service(TagResourceError::TooManyTags(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::BadRequest(ref cause) => cause, + TagResourceError::Forbidden(ref cause) => cause, + TagResourceError::InternalServerError(ref cause) => cause, + TagResourceError::NotFound(ref cause) => cause, + TagResourceError::ServiceUnavailable(ref cause) => cause, + TagResourceError::TooManyRequests(ref cause) => cause, + TagResourceError::TooManyTags(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UntagResourceError::BadRequest(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(UntagResourceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UntagResourceError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UntagResourceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UntagResourceError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(UntagResourceError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::BadRequest(ref cause) => cause, + UntagResourceError::Forbidden(ref cause) => cause, + UntagResourceError::InternalServerError(ref cause) => cause, + UntagResourceError::NotFound(ref cause) => cause, + UntagResourceError::ServiceUnavailable(ref cause) => cause, + UntagResourceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by UpdateMesh +#[derive(Debug, PartialEq)] +pub enum UpdateMeshError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl UpdateMeshError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateMeshError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(UpdateMeshError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(UpdateMeshError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UpdateMeshError::InternalServerError(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateMeshError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateMeshError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(UpdateMeshError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateMeshError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateMeshError { + fn description(&self) -> &str { + match *self { + UpdateMeshError::BadRequest(ref cause) => cause, + UpdateMeshError::Conflict(ref cause) => cause, + UpdateMeshError::Forbidden(ref cause) => cause, + UpdateMeshError::InternalServerError(ref cause) => cause, + UpdateMeshError::NotFound(ref cause) => cause, + UpdateMeshError::ServiceUnavailable(ref cause) => cause, + UpdateMeshError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by UpdateRoute +#[derive(Debug, PartialEq)] +pub enum UpdateRouteError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl UpdateRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateRouteError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(UpdateRouteError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(UpdateRouteError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UpdateRouteError::InternalServerError(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(UpdateRouteError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateRouteError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateRouteError::ServiceUnavailable(err.msg)) + } + "TooManyRequestsException" => { + return RusotoError::Service(UpdateRouteError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateRouteError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateRouteError { + fn description(&self) -> &str { + match *self { + UpdateRouteError::BadRequest(ref cause) => cause, + UpdateRouteError::Conflict(ref cause) => cause, + UpdateRouteError::Forbidden(ref cause) => cause, + UpdateRouteError::InternalServerError(ref cause) => cause, + UpdateRouteError::LimitExceeded(ref cause) => cause, + UpdateRouteError::NotFound(ref cause) => cause, + UpdateRouteError::ServiceUnavailable(ref cause) => cause, + UpdateRouteError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by UpdateVirtualNode +#[derive(Debug, PartialEq)] +pub enum UpdateVirtualNodeError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl UpdateVirtualNodeError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateVirtualNodeError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(UpdateVirtualNodeError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(UpdateVirtualNodeError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UpdateVirtualNodeError::InternalServerError( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(UpdateVirtualNodeError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateVirtualNodeError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateVirtualNodeError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(UpdateVirtualNodeError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateVirtualNodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateVirtualNodeError { + fn description(&self) -> &str { + match *self { + UpdateVirtualNodeError::BadRequest(ref cause) => cause, + UpdateVirtualNodeError::Conflict(ref cause) => cause, + UpdateVirtualNodeError::Forbidden(ref cause) => cause, + UpdateVirtualNodeError::InternalServerError(ref cause) => cause, + UpdateVirtualNodeError::LimitExceeded(ref cause) => cause, + UpdateVirtualNodeError::NotFound(ref cause) => cause, + UpdateVirtualNodeError::ServiceUnavailable(ref cause) => cause, + UpdateVirtualNodeError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by UpdateVirtualRouter +#[derive(Debug, PartialEq)] +pub enum UpdateVirtualRouterError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl UpdateVirtualRouterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateVirtualRouterError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(UpdateVirtualRouterError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(UpdateVirtualRouterError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UpdateVirtualRouterError::InternalServerError( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(UpdateVirtualRouterError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateVirtualRouterError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateVirtualRouterError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(UpdateVirtualRouterError::TooManyRequests(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateVirtualRouterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateVirtualRouterError { + fn description(&self) -> &str { + match *self { + UpdateVirtualRouterError::BadRequest(ref cause) => cause, + UpdateVirtualRouterError::Conflict(ref cause) => cause, + UpdateVirtualRouterError::Forbidden(ref cause) => cause, + UpdateVirtualRouterError::InternalServerError(ref cause) => cause, + UpdateVirtualRouterError::LimitExceeded(ref cause) => cause, + UpdateVirtualRouterError::NotFound(ref cause) => cause, + UpdateVirtualRouterError::ServiceUnavailable(ref cause) => cause, + UpdateVirtualRouterError::TooManyRequests(ref cause) => cause, + } + } +} +/// Errors returned by UpdateVirtualService +#[derive(Debug, PartialEq)] +pub enum UpdateVirtualServiceError { + ///

The request syntax was malformed. Check your request syntax and try again.

+ BadRequest(String), + ///

The request contains a client token that was used for a previous update resource call + /// with different specifications. Try the request again with a new client token.

+ Conflict(String), + ///

You don't have permissions to perform this action.

+ Forbidden(String), + ///

The request processing has failed because of an unknown error, exception, or + /// failure.

+ InternalServerError(String), + ///

You have exceeded a service limit for your account. For more information, see Service + /// Limits in the AWS App Mesh User Guide.

+ LimitExceeded(String), + ///

The specified resource doesn't exist. Check your request syntax and try again.

+ NotFound(String), + ///

The request has failed due to a temporary failure of the service.

+ ServiceUnavailable(String), + ///

The maximum request rate permitted by the App Mesh APIs has been exceeded for your + /// account. For best results, use an increasing or variable sleep interval between + /// requests.

+ TooManyRequests(String), +} + +impl UpdateVirtualServiceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UpdateVirtualServiceError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(UpdateVirtualServiceError::Conflict(err.msg)) + } + "ForbiddenException" => { + return RusotoError::Service(UpdateVirtualServiceError::Forbidden(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UpdateVirtualServiceError::InternalServerError( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(UpdateVirtualServiceError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UpdateVirtualServiceError::NotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateVirtualServiceError::ServiceUnavailable( + err.msg, + )) + } + "TooManyRequestsException" => { + return RusotoError::Service(UpdateVirtualServiceError::TooManyRequests( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateVirtualServiceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateVirtualServiceError { + fn description(&self) -> &str { + match *self { + UpdateVirtualServiceError::BadRequest(ref cause) => cause, + UpdateVirtualServiceError::Conflict(ref cause) => cause, + UpdateVirtualServiceError::Forbidden(ref cause) => cause, + UpdateVirtualServiceError::InternalServerError(ref cause) => cause, + UpdateVirtualServiceError::LimitExceeded(ref cause) => cause, + UpdateVirtualServiceError::NotFound(ref cause) => cause, + UpdateVirtualServiceError::ServiceUnavailable(ref cause) => cause, + UpdateVirtualServiceError::TooManyRequests(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the AWS App Mesh API. AWS App Mesh clients implement this trait. +pub trait AppMesh { + ///

Creates a service mesh. A service mesh is a logical boundary for network traffic between + /// the services that reside within it.

+ /// + ///
     <p>After you create your service mesh, you can create virtual services, virtual nodes,
+    /// virtual routers, and routes to distribute traffic between the applications in your
+    /// mesh.</p>
+    /// 
+ fn create_mesh( + &self, + input: CreateMeshInput, + ) -> RusotoFuture; + + ///

Creates a route that is associated with a virtual router.

+ /// + ///
     <p>You can use the <code>prefix</code> parameter in your route specification for path-based
+    /// routing of requests. For example, if your virtual service name is
+    /// <code>my-service.local</code> and you want the route to match requests to
+    /// <code>my-service.local/metrics</code>, your prefix should be
+    /// <code>/metrics</code>.</p>
+    /// <p>If your route matches a request, you can distribute traffic to one or more target
+    /// virtual nodes with relative weighting.</p>
+    /// 
+ fn create_route( + &self, + input: CreateRouteInput, + ) -> RusotoFuture; + + ///

Creates a virtual node within a service mesh.

+ /// + ///
     <p>A virtual node acts as a logical pointer to a particular task group, such as an Amazon ECS
+    /// service or a Kubernetes deployment. When you create a virtual node, you can specify the
+    /// service discovery information for your task group.</p>
+    /// <p>Any inbound traffic that your virtual node expects should be specified as a
+    /// <code>listener</code>. Any outbound traffic that your virtual node expects to reach
+    /// should be specified as a <code>backend</code>.</p>
+    /// <p>The response metadata for your new virtual node contains the <code>arn</code> that is
+    /// associated with the virtual node. Set this value (either the full ARN or the truncated
+    /// resource name: for example, <code>mesh/default/virtualNode/simpleapp</code>) as the
+    /// <code>APPMESH_VIRTUAL_NODE_NAME</code> environment variable for your task group's Envoy
+    /// proxy container in your task definition or pod spec. This is then mapped to the
+    /// <code>node.id</code> and <code>node.cluster</code> Envoy parameters.</p>
+    /// <note>
+    /// <p>If you require your Envoy stats or tracing to use a different name, you can override
+    /// the <code>node.cluster</code> value that is set by
+    /// <code>APPMESH_VIRTUAL_NODE_NAME</code> with the
+    /// <code>APPMESH_VIRTUAL_NODE_CLUSTER</code> environment variable.</p>
+    /// </note>
+    /// 
+ fn create_virtual_node( + &self, + input: CreateVirtualNodeInput, + ) -> RusotoFuture; + + ///

Creates a virtual router within a service mesh.

+ /// + ///
     <p>Any inbound traffic that your virtual router expects should be specified as a
+    /// <code>listener</code>. </p>
+    /// <p>Virtual routers handle traffic for one or more virtual services within your mesh. After
+    /// you create your virtual router, create and associate routes for your virtual router that
+    /// direct incoming requests to different virtual nodes.</p>
+    /// 
+ fn create_virtual_router( + &self, + input: CreateVirtualRouterInput, + ) -> RusotoFuture; + + ///

Creates a virtual service within a service mesh.

+ /// + ///
     <p>A virtual service is an abstraction of a real service that is provided by a virtual node
+    /// directly or indirectly by means of a virtual router. Dependent services call your virtual
+    /// service by its <code>virtualServiceName</code>, and those requests are routed to the
+    /// virtual node or virtual router that is specified as the provider for the virtual
+    /// service.</p>
+    /// 
+ fn create_virtual_service( + &self, + input: CreateVirtualServiceInput, + ) -> RusotoFuture; + + ///

Deletes an existing service mesh.

+ /// + ///
     <p>You must delete all resources (virtual services, routes, virtual routers, and virtual
+    /// nodes) in the service mesh before you can delete the mesh itself.</p>
+    /// 
+ fn delete_mesh( + &self, + input: DeleteMeshInput, + ) -> RusotoFuture; + + ///

Deletes an existing route.

+ fn delete_route( + &self, + input: DeleteRouteInput, + ) -> RusotoFuture; + + ///

Deletes an existing virtual node.

+ /// + ///
     <p>You must delete any virtual services that list a virtual node as a service provider
+    /// before you can delete the virtual node itself.</p>
+    /// 
+ fn delete_virtual_node( + &self, + input: DeleteVirtualNodeInput, + ) -> RusotoFuture; + + ///

Deletes an existing virtual router.

+ /// + ///
     <p>You must delete any routes associated with the virtual router before you can delete the
+    /// router itself.</p>
+    /// 
+ fn delete_virtual_router( + &self, + input: DeleteVirtualRouterInput, + ) -> RusotoFuture; + + ///

Deletes an existing virtual service.

+ fn delete_virtual_service( + &self, + input: DeleteVirtualServiceInput, + ) -> RusotoFuture; + + ///

Describes an existing service mesh.

+ fn describe_mesh( + &self, + input: DescribeMeshInput, + ) -> RusotoFuture; + + ///

Describes an existing route.

+ fn describe_route( + &self, + input: DescribeRouteInput, + ) -> RusotoFuture; + + ///

Describes an existing virtual node.

+ fn describe_virtual_node( + &self, + input: DescribeVirtualNodeInput, + ) -> RusotoFuture; + + ///

Describes an existing virtual router.

+ fn describe_virtual_router( + &self, + input: DescribeVirtualRouterInput, + ) -> RusotoFuture; + + ///

Describes an existing virtual service.

+ fn describe_virtual_service( + &self, + input: DescribeVirtualServiceInput, + ) -> RusotoFuture; + + ///

Returns a list of existing service meshes.

+ fn list_meshes( + &self, + input: ListMeshesInput, + ) -> RusotoFuture; + + ///

Returns a list of existing routes in a service mesh.

+ fn list_routes( + &self, + input: ListRoutesInput, + ) -> RusotoFuture; + + ///

List the tags for an App Mesh resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceInput, + ) -> RusotoFuture; + + ///

Returns a list of existing virtual nodes.

+ fn list_virtual_nodes( + &self, + input: ListVirtualNodesInput, + ) -> RusotoFuture; + + ///

Returns a list of existing virtual routers in a service mesh.

+ fn list_virtual_routers( + &self, + input: ListVirtualRoutersInput, + ) -> RusotoFuture; + + ///

Returns a list of existing virtual services in a service mesh.

+ fn list_virtual_services( + &self, + input: ListVirtualServicesInput, + ) -> RusotoFuture; + + ///

Associates the specified tags to a resource with the specified resourceArn. + /// If existing tags on a resource aren't specified in the request parameters, they aren't + /// changed. When a resource is deleted, the tags associated with that resource are also + /// deleted.

+ fn tag_resource( + &self, + input: TagResourceInput, + ) -> RusotoFuture; + + ///

Deletes specified tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceInput, + ) -> RusotoFuture; + + ///

Updates an existing service mesh.

+ fn update_mesh( + &self, + input: UpdateMeshInput, + ) -> RusotoFuture; + + ///

Updates an existing route for a specified service mesh and virtual router.

+ fn update_route( + &self, + input: UpdateRouteInput, + ) -> RusotoFuture; + + ///

Updates an existing virtual node in a specified service mesh.

+ fn update_virtual_node( + &self, + input: UpdateVirtualNodeInput, + ) -> RusotoFuture; + + ///

Updates an existing virtual router in a specified service mesh.

+ fn update_virtual_router( + &self, + input: UpdateVirtualRouterInput, + ) -> RusotoFuture; + + ///

Updates an existing virtual service in a specified service mesh.

+ fn update_virtual_service( + &self, + input: UpdateVirtualServiceInput, + ) -> RusotoFuture; +} +/// A client for the AWS App Mesh API. +#[derive(Clone)] +pub struct AppMeshClient { + client: Client, + region: region::Region, +} + +impl AppMeshClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> AppMeshClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> AppMeshClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AppMeshClient { + AppMeshClient { client, region } + } +} + +impl AppMesh for AppMeshClient { + ///

Creates a service mesh. A service mesh is a logical boundary for network traffic between + /// the services that reside within it.

+ /// + ///
     <p>After you create your service mesh, you can create virtual services, virtual nodes,
+    /// virtual routers, and routes to distribute traffic between the applications in your
+    /// mesh.</p>
+    /// 
+ fn create_mesh( + &self, + input: CreateMeshInput, + ) -> RusotoFuture { + let request_uri = "/v20190125/meshes"; + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateMeshError::from_response(response))), + ) + } + }) + } + + ///

Creates a route that is associated with a virtual router.

+ /// + ///
     <p>You can use the <code>prefix</code> parameter in your route specification for path-based
+    /// routing of requests. For example, if your virtual service name is
+    /// <code>my-service.local</code> and you want the route to match requests to
+    /// <code>my-service.local/metrics</code>, your prefix should be
+    /// <code>/metrics</code>.</p>
+    /// <p>If your route matches a request, you can distribute traffic to one or more target
+    /// virtual nodes with relative weighting.</p>
+    /// 
+ fn create_route( + &self, + input: CreateRouteInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouter/{virtual_router_name}/routes", + mesh_name = input.mesh_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateRouteError::from_response(response))), + ) + } + }) + } + + ///

Creates a virtual node within a service mesh.

+ /// + ///
     <p>A virtual node acts as a logical pointer to a particular task group, such as an Amazon ECS
+    /// service or a Kubernetes deployment. When you create a virtual node, you can specify the
+    /// service discovery information for your task group.</p>
+    /// <p>Any inbound traffic that your virtual node expects should be specified as a
+    /// <code>listener</code>. Any outbound traffic that your virtual node expects to reach
+    /// should be specified as a <code>backend</code>.</p>
+    /// <p>The response metadata for your new virtual node contains the <code>arn</code> that is
+    /// associated with the virtual node. Set this value (either the full ARN or the truncated
+    /// resource name: for example, <code>mesh/default/virtualNode/simpleapp</code>) as the
+    /// <code>APPMESH_VIRTUAL_NODE_NAME</code> environment variable for your task group's Envoy
+    /// proxy container in your task definition or pod spec. This is then mapped to the
+    /// <code>node.id</code> and <code>node.cluster</code> Envoy parameters.</p>
+    /// <note>
+    /// <p>If you require your Envoy stats or tracing to use a different name, you can override
+    /// the <code>node.cluster</code> value that is set by
+    /// <code>APPMESH_VIRTUAL_NODE_NAME</code> with the
+    /// <code>APPMESH_VIRTUAL_NODE_CLUSTER</code> environment variable.</p>
+    /// </note>
+    /// 
+ fn create_virtual_node( + &self, + input: CreateVirtualNodeInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualNodes", + mesh_name = input.mesh_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateVirtualNodeError::from_response(response))), + ) + } + }) + } + + ///

Creates a virtual router within a service mesh.

+ /// + ///
     <p>Any inbound traffic that your virtual router expects should be specified as a
+    /// <code>listener</code>. </p>
+    /// <p>Virtual routers handle traffic for one or more virtual services within your mesh. After
+    /// you create your virtual router, create and associate routes for your virtual router that
+    /// direct incoming requests to different virtual nodes.</p>
+    /// 
+ fn create_virtual_router( + &self, + input: CreateVirtualRouterInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouters", + mesh_name = input.mesh_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(CreateVirtualRouterError::from_response(response)) + }), + ) + } + }) + } + + ///

Creates a virtual service within a service mesh.

+ /// + ///
     <p>A virtual service is an abstraction of a real service that is provided by a virtual node
+    /// directly or indirectly by means of a virtual router. Dependent services call your virtual
+    /// service by its <code>virtualServiceName</code>, and those requests are routed to the
+    /// virtual node or virtual router that is specified as the provider for the virtual
+    /// service.</p>
+    /// 
+ fn create_virtual_service( + &self, + input: CreateVirtualServiceInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualServices", + mesh_name = input.mesh_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(CreateVirtualServiceError::from_response(response)) + }), + ) + } + }) + } + + ///

Deletes an existing service mesh.

+ /// + ///
     <p>You must delete all resources (virtual services, routes, virtual routers, and virtual
+    /// nodes) in the service mesh before you can delete the mesh itself.</p>
+    /// 
+ fn delete_mesh( + &self, + input: DeleteMeshInput, + ) -> RusotoFuture { + let request_uri = format!("/v20190125/meshes/{mesh_name}", mesh_name = input.mesh_name); + + let mut request = SignedRequest::new("DELETE", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteMeshError::from_response(response))), + ) + } + }) + } + + ///

Deletes an existing route.

+ fn delete_route( + &self, + input: DeleteRouteInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouter/{virtual_router_name}/routes/{route_name}", + mesh_name = input.mesh_name, + route_name = input.route_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("DELETE", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteRouteError::from_response(response))), + ) + } + }) + } + + ///

Deletes an existing virtual node.

+ /// + ///
     <p>You must delete any virtual services that list a virtual node as a service provider
+    /// before you can delete the virtual node itself.</p>
+    /// 
+ fn delete_virtual_node( + &self, + input: DeleteVirtualNodeInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualNodes/{virtual_node_name}", + mesh_name = input.mesh_name, + virtual_node_name = input.virtual_node_name + ); + + let mut request = SignedRequest::new("DELETE", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteVirtualNodeError::from_response(response))), + ) + } + }) + } + + ///

Deletes an existing virtual router.

+ /// + ///
     <p>You must delete any routes associated with the virtual router before you can delete the
+    /// router itself.</p>
+    /// 
+ fn delete_virtual_router( + &self, + input: DeleteVirtualRouterInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouters/{virtual_router_name}", + mesh_name = input.mesh_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("DELETE", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DeleteVirtualRouterError::from_response(response)) + }), + ) + } + }) + } + + ///

Deletes an existing virtual service.

+ fn delete_virtual_service( + &self, + input: DeleteVirtualServiceInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualServices/{virtual_service_name}", + mesh_name = input.mesh_name, + virtual_service_name = input.virtual_service_name + ); + + let mut request = SignedRequest::new("DELETE", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DeleteVirtualServiceError::from_response(response)) + }), + ) + } + }) + } + + ///

Describes an existing service mesh.

+ fn describe_mesh( + &self, + input: DescribeMeshInput, + ) -> RusotoFuture { + let request_uri = format!("/v20190125/meshes/{mesh_name}", mesh_name = input.mesh_name); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeMeshError::from_response(response))), + ) + } + }) + } + + ///

Describes an existing route.

+ fn describe_route( + &self, + input: DescribeRouteInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouter/{virtual_router_name}/routes/{route_name}", + mesh_name = input.mesh_name, + route_name = input.route_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeRouteError::from_response(response))), + ) + } + }) + } + + ///

Describes an existing virtual node.

+ fn describe_virtual_node( + &self, + input: DescribeVirtualNodeInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualNodes/{virtual_node_name}", + mesh_name = input.mesh_name, + virtual_node_name = input.virtual_node_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeVirtualNodeError::from_response(response)) + }), + ) + } + }) + } + + ///

Describes an existing virtual router.

+ fn describe_virtual_router( + &self, + input: DescribeVirtualRouterInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouters/{virtual_router_name}", + mesh_name = input.mesh_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeVirtualRouterError::from_response(response)) + }), + ) + } + }) + } + + ///

Describes an existing virtual service.

+ fn describe_virtual_service( + &self, + input: DescribeVirtualServiceInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualServices/{virtual_service_name}", + mesh_name = input.mesh_name, + virtual_service_name = input.virtual_service_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeVirtualServiceError::from_response(response)) + }), + ) + } + }) + } + + ///

Returns a list of existing service meshes.

+ fn list_meshes( + &self, + input: ListMeshesInput, + ) -> RusotoFuture { + let request_uri = "/v20190125/meshes"; + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.limit { + params.put("limit", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListMeshesError::from_response(response))), + ) + } + }) + } + + ///

Returns a list of existing routes in a service mesh.

+ fn list_routes( + &self, + input: ListRoutesInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouter/{virtual_router_name}/routes", + mesh_name = input.mesh_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.limit { + params.put("limit", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListRoutesError::from_response(response))), + ) + } + }) + } + + ///

List the tags for an App Mesh resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceInput, + ) -> RusotoFuture { + let request_uri = "/v20190125/tags"; + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.limit { + params.put("limit", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + params.put("resourceArn", &input.resource_arn); + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + + ///

Returns a list of existing virtual nodes.

+ fn list_virtual_nodes( + &self, + input: ListVirtualNodesInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualNodes", + mesh_name = input.mesh_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.limit { + params.put("limit", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListVirtualNodesError::from_response(response))), + ) + } + }) + } + + ///

Returns a list of existing virtual routers in a service mesh.

+ fn list_virtual_routers( + &self, + input: ListVirtualRoutersInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouters", + mesh_name = input.mesh_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.limit { + params.put("limit", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListVirtualRoutersError::from_response(response))), + ) + } + }) + } + + ///

Returns a list of existing virtual services in a service mesh.

+ fn list_virtual_services( + &self, + input: ListVirtualServicesInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualServices", + mesh_name = input.mesh_name + ); + + let mut request = SignedRequest::new("GET", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.limit { + params.put("limit", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListVirtualServicesError::from_response(response)) + }), + ) + } + }) + } + + ///

Associates the specified tags to a resource with the specified resourceArn. + /// If existing tags on a resource aren't specified in the request parameters, they aren't + /// changed. When a resource is deleted, the tags associated with that resource are also + /// deleted.

+ fn tag_resource( + &self, + input: TagResourceInput, + ) -> RusotoFuture { + let request_uri = "/v20190125/tag"; + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + let mut params = Params::new(); + params.put("resourceArn", &input.resource_arn); + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Deletes specified tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceInput, + ) -> RusotoFuture { + let request_uri = "/v20190125/untag"; + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + let mut params = Params::new(); + params.put("resourceArn", &input.resource_arn); + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + + ///

Updates an existing service mesh.

+ fn update_mesh( + &self, + input: UpdateMeshInput, + ) -> RusotoFuture { + let request_uri = format!("/v20190125/meshes/{mesh_name}", mesh_name = input.mesh_name); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateMeshError::from_response(response))), + ) + } + }) + } + + ///

Updates an existing route for a specified service mesh and virtual router.

+ fn update_route( + &self, + input: UpdateRouteInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouter/{virtual_router_name}/routes/{route_name}", + mesh_name = input.mesh_name, + route_name = input.route_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateRouteError::from_response(response))), + ) + } + }) + } + + ///

Updates an existing virtual node in a specified service mesh.

+ fn update_virtual_node( + &self, + input: UpdateVirtualNodeInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualNodes/{virtual_node_name}", + mesh_name = input.mesh_name, + virtual_node_name = input.virtual_node_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateVirtualNodeError::from_response(response))), + ) + } + }) + } + + ///

Updates an existing virtual router in a specified service mesh.

+ fn update_virtual_router( + &self, + input: UpdateVirtualRouterInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualRouters/{virtual_router_name}", + mesh_name = input.mesh_name, + virtual_router_name = input.virtual_router_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(UpdateVirtualRouterError::from_response(response)) + }), + ) + } + }) + } + + ///

Updates an existing virtual service in a specified service mesh.

+ fn update_virtual_service( + &self, + input: UpdateVirtualServiceInput, + ) -> RusotoFuture { + let request_uri = format!( + "/v20190125/meshes/{mesh_name}/virtualServices/{virtual_service_name}", + mesh_name = input.mesh_name, + virtual_service_name = input.virtual_service_name + ); + + let mut request = SignedRequest::new("PUT", "appmesh", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(UpdateVirtualServiceError::from_response(response)) + }), + ) + } + }) + } +} diff --git a/rusoto/services/appmesh/src/lib.rs b/rusoto/services/appmesh/src/lib.rs new file mode 100644 index 00000000000..66226085a7e --- /dev/null +++ b/rusoto/services/appmesh/src/lib.rs @@ -0,0 +1,46 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and +//! control microservices. App Mesh standardizes how your microservices communicate, giving you +//! end-to-end visibility and helping to ensure high availability for your applications.

+//! +//!
     <p>App Mesh gives you consistent visibility and network traffic controls for every
+//! microservice in an application. You can use App Mesh with AWS Fargate, Amazon ECS, Amazon EKS,
+//! Kubernetes on AWS, and Amazon EC2.</p>
+//! <note>
+//! <p>App Mesh supports microservice applications that use service discovery naming for their
+//! components. For more information about service discovery on Amazon ECS, see <a href="http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html">Service Discovery</a> in the
+//! <i>Amazon Elastic Container Service Developer Guide</i>. Kubernetes <code>kube-dns</code> and
+//! <code>coredns</code> are supported. For more information, see <a href="https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/">DNS
+//! for Services and Pods</a> in the Kubernetes documentation.</p>
+//! </note>
+//! 
+//! +//! If you're using the service, you're probably looking for [AppMeshClient](struct.AppMeshClient.html) and [AppMesh](trait.AppMesh.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/appstream/Cargo.toml b/rusoto/services/appstream/Cargo.toml index 30190302f05..923c6e915c6 100644 --- a/rusoto/services/appstream/Cargo.toml +++ b/rusoto/services/appstream/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_appstream" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/appstream/README.md b/rusoto/services/appstream/README.md index 0aa9ee9e0f4..6828fdbb15c 100644 --- a/rusoto/services/appstream/README.md +++ b/rusoto/services/appstream/README.md @@ -23,9 +23,16 @@ To use `rusoto_appstream` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_appstream = "0.40.0" +rusoto_appstream = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/appstream/src/custom/mod.rs b/rusoto/services/appstream/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/appstream/src/custom/mod.rs +++ b/rusoto/services/appstream/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/appstream/src/generated.rs b/rusoto/services/appstream/src/generated.rs index fa20a76cbc8..179eab37cf3 100644 --- a/rusoto/services/appstream/src/generated.rs +++ b/rusoto/services/appstream/src/generated.rs @@ -9,24 +9,35 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; +///

Describes an interface VPC endpoint (interface endpoint) that lets you create a private connection between the virtual private cloud (VPC) that you specify and AppStream 2.0. When you specify an interface endpoint for a stack, users of the stack can connect to AppStream 2.0 only through that endpoint. When you specify an interface endpoint for an image builder, administrators can connect to the image builder only through that endpoint.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AccessEndpoint { + ///

The type of interface endpoint.

+ #[serde(rename = "EndpointType")] + pub endpoint_type: String, + ///

The identifier (ID) of the VPC in which the interface endpoint is used.

+ #[serde(rename = "VpceId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub vpce_id: Option, +} + ///

Describes an application in the application catalog.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Application { ///

The application name to display.

#[serde(rename = "DisplayName")] @@ -72,7 +83,7 @@ pub struct ApplicationSettings { ///

Describes the persistent application settings for users of a stack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplicationSettingsResponse { ///

Specifies whether persistent application settings are enabled for users during their streaming sessions.

#[serde(rename = "Enabled")] @@ -99,7 +110,7 @@ pub struct AssociateFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateFleetResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -110,7 +121,7 @@ pub struct BatchAssociateUserStackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAssociateUserStackResult { ///

The list of UserStackAssociationError objects.

#[serde(rename = "errors")] @@ -126,7 +137,7 @@ pub struct BatchDisassociateUserStackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDisassociateUserStackResult { ///

The list of UserStackAssociationError objects.

#[serde(rename = "errors")] @@ -144,7 +155,7 @@ pub struct ComputeCapacity { ///

Describes the capacity status for a fleet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComputeCapacityStatus { ///

The number of currently available instances that can be used to stream sessions.

#[serde(rename = "Available")] @@ -181,7 +192,7 @@ pub struct CopyImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CopyImageResponse { ///

The name of the destination image.

#[serde(rename = "DestinationImageName")] @@ -203,7 +214,7 @@ pub struct CreateDirectoryConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDirectoryConfigResult { ///

Information about the directory configuration.

#[serde(rename = "DirectoryConfig")] @@ -240,7 +251,11 @@ pub struct CreateFleetRequest { #[serde(rename = "FleetType")] #[serde(skip_serializing_if = "Option::is_none")] pub fleet_type: Option, - ///

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 900.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

+ ///

The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

+ #[serde(rename = "IamRoleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub iam_role_arn: Option, + ///

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

#[serde(rename = "IdleDisconnectTimeoutInSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub idle_disconnect_timeout_in_seconds: Option, @@ -262,7 +277,7 @@ pub struct CreateFleetRequest { ///

A unique name for the fleet.

#[serde(rename = "Name")] pub name: String, - ///

The tags to associate with the fleet. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \ - @

For more information, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

The tags to associate with the fleet. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \ - @

For more information, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -273,7 +288,7 @@ pub struct CreateFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFleetResult { ///

Information about the fleet.

#[serde(rename = "Fleet")] @@ -283,6 +298,10 @@ pub struct CreateFleetResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateImageBuilderRequest { + ///

The list of interface VPC endpoint (interface endpoint) objects. Administrators can connect to the image builder only through the specified endpoints.

+ #[serde(rename = "AccessEndpoints")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_endpoints: Option>, ///

The version of the AppStream 2.0 agent to use for this image builder. To use the latest version of the AppStream 2.0 agent, specify [LATEST].

#[serde(rename = "AppstreamAgentVersion")] #[serde(skip_serializing_if = "Option::is_none")] @@ -303,6 +322,10 @@ pub struct CreateImageBuilderRequest { #[serde(rename = "EnableDefaultInternetAccess")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_default_internet_access: Option, + ///

The Amazon Resource Name (ARN) of the IAM role to apply to the image builder. To assume a role, the image builder calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

+ #[serde(rename = "IamRoleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub iam_role_arn: Option, ///

The ARN of the public, private, or shared image to use.

#[serde(rename = "ImageArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -317,7 +340,7 @@ pub struct CreateImageBuilderRequest { ///

A unique name for the image builder.

#[serde(rename = "Name")] pub name: String, - ///

The tags to associate with the image builder. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

The tags to associate with the image builder. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -328,7 +351,7 @@ pub struct CreateImageBuilderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateImageBuilderResult { ///

Information about the image builder.

#[serde(rename = "ImageBuilder")] @@ -348,7 +371,7 @@ pub struct CreateImageBuilderStreamingURLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateImageBuilderStreamingURLResult { ///

The elapsed time, in seconds after the Unix epoch, when this URL expires.

#[serde(rename = "Expires")] @@ -362,6 +385,10 @@ pub struct CreateImageBuilderStreamingURLResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateStackRequest { + ///

The list of interface VPC endpoint (interface endpoint) objects. Users of the stack can connect to AppStream 2.0 only through the specified endpoints.

+ #[serde(rename = "AccessEndpoints")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_endpoints: Option>, ///

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

#[serde(rename = "ApplicationSettings")] #[serde(skip_serializing_if = "Option::is_none")] @@ -389,7 +416,7 @@ pub struct CreateStackRequest { #[serde(rename = "StorageConnectors")] #[serde(skip_serializing_if = "Option::is_none")] pub storage_connectors: Option>, - ///

The tags to associate with the stack. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \ - @

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

The tags to associate with the stack. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \ - @

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -400,7 +427,7 @@ pub struct CreateStackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStackResult { ///

Information about the stack.

#[serde(rename = "Stack")] @@ -417,7 +444,7 @@ pub struct CreateStreamingURLRequest { ///

The name of the fleet.

#[serde(rename = "FleetName")] pub fleet_name: String, - ///

The session context. For more information, see Session Context in the Amazon AppStream 2.0 Developer Guide.

+ ///

The session context. For more information, see Session Context in the Amazon AppStream 2.0 Administration Guide.

#[serde(rename = "SessionContext")] #[serde(skip_serializing_if = "Option::is_none")] pub session_context: Option, @@ -434,7 +461,7 @@ pub struct CreateStreamingURLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStreamingURLResult { ///

The elapsed time, in seconds after the Unix epoch, when this URL expires.

#[serde(rename = "Expires")] @@ -450,9 +477,9 @@ pub struct CreateStreamingURLResult { pub struct CreateUsageReportSubscriptionRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUsageReportSubscriptionResult { - ///

The Amazon S3 bucket where generated reports are stored. When a usage report subscription is enabled for the first time for an account in an AWS Region, an S3 bucket is created. The bucket is unique to the AWS account and the Region.

+ ///

The Amazon S3 bucket where generated reports are stored.

If you enabled on-instance session scripts and Amazon S3 logging for your session script configuration, AppStream 2.0 created an S3 bucket to store the script output. The bucket is unique to your account and Region. When you enable usage reporting in this case, AppStream 2.0 uses the same bucket to store your usage reports. If you haven't already enabled on-instance session scripts, when you enable usage reports, AppStream 2.0 creates a new S3 bucket.

#[serde(rename = "S3BucketName")] #[serde(skip_serializing_if = "Option::is_none")] pub s3_bucket_name: Option, @@ -485,7 +512,7 @@ pub struct CreateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -496,7 +523,7 @@ pub struct DeleteDirectoryConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDirectoryConfigResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -507,7 +534,7 @@ pub struct DeleteFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFleetResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -518,7 +545,7 @@ pub struct DeleteImageBuilderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteImageBuilderResult { ///

Information about the image builder.

#[serde(rename = "ImageBuilder")] @@ -537,7 +564,7 @@ pub struct DeleteImagePermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteImagePermissionsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -548,7 +575,7 @@ pub struct DeleteImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteImageResult { ///

Information about the image.

#[serde(rename = "Image")] @@ -564,14 +591,14 @@ pub struct DeleteStackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteStackResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteUsageReportSubscriptionRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUsageReportSubscriptionResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -585,7 +612,7 @@ pub struct DeleteUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -605,7 +632,7 @@ pub struct DescribeDirectoryConfigsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDirectoryConfigsResult { ///

Information about the directory configurations. Note that although the response syntax in this topic includes the account password, this password is not returned in the actual response.

#[serde(rename = "DirectoryConfigs")] @@ -630,7 +657,7 @@ pub struct DescribeFleetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetsResult { ///

Information about the fleets.

#[serde(rename = "Fleets")] @@ -659,7 +686,7 @@ pub struct DescribeImageBuildersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeImageBuildersResult { ///

Information about the image builders.

#[serde(rename = "ImageBuilders")] @@ -691,7 +718,7 @@ pub struct DescribeImagePermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeImagePermissionsResult { ///

The name of the private image.

#[serde(rename = "Name")] @@ -732,7 +759,7 @@ pub struct DescribeImagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeImagesResult { ///

Information about the images.

#[serde(rename = "Images")] @@ -771,7 +798,7 @@ pub struct DescribeSessionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSessionsResult { ///

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

#[serde(rename = "NextToken")] @@ -796,7 +823,7 @@ pub struct DescribeStacksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStacksResult { ///

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

#[serde(rename = "NextToken")] @@ -821,7 +848,7 @@ pub struct DescribeUsageReportSubscriptionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUsageReportSubscriptionsResult { ///

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

#[serde(rename = "NextToken")] @@ -858,7 +885,7 @@ pub struct DescribeUserStackAssociationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserStackAssociationsResult { ///

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

#[serde(rename = "NextToken")] @@ -886,7 +913,7 @@ pub struct DescribeUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUsersResult { ///

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

#[serde(rename = "NextToken")] @@ -900,7 +927,7 @@ pub struct DescribeUsersResult { ///

Describes the configuration information required to join fleets and image builders to Microsoft Active Directory domains.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectoryConfig { ///

The time the directory configuration was created.

#[serde(rename = "CreatedTime")] @@ -930,7 +957,7 @@ pub struct DisableUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableUserResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -944,7 +971,7 @@ pub struct DisassociateFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateFleetResult {} ///

Describes the configuration information required to join fleets and image builders to Microsoft Active Directory domains.

@@ -971,7 +998,7 @@ pub struct EnableUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableUserResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -982,14 +1009,14 @@ pub struct ExpireSessionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExpireSessionResult {} -///

Describes the parameters for a fleet.

+///

Describes a fleet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Fleet { - ///

The ARN for the fleet.

+ ///

The Amazon Resource Name (ARN) for the fleet.

#[serde(rename = "Arn")] pub arn: String, ///

The capacity status for the fleet.

@@ -1027,7 +1054,11 @@ pub struct Fleet { #[serde(rename = "FleetType")] #[serde(skip_serializing_if = "Option::is_none")] pub fleet_type: Option, - ///

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 900.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

+ ///

The ARN of the IAM role that is applied to the fleet. To assume a role, the fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

+ #[serde(rename = "IamRoleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub iam_role_arn: Option, + ///

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

#[serde(rename = "IdleDisconnectTimeoutInSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub idle_disconnect_timeout_in_seconds: Option, @@ -1060,7 +1091,7 @@ pub struct Fleet { ///

Describes a fleet error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FleetError { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -1074,7 +1105,7 @@ pub struct FleetError { ///

Describes an image.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Image { ///

The applications associated with the image.

#[serde(rename = "Applications")] @@ -1104,6 +1135,10 @@ pub struct Image { #[serde(rename = "DisplayName")] #[serde(skip_serializing_if = "Option::is_none")] pub display_name: Option, + ///

The name of the image builder that was used to create the private image. If the image is shared, this value is null.

+ #[serde(rename = "ImageBuilderName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_builder_name: Option, ///

Indicates whether an image builder can be launched from this image.

#[serde(rename = "ImageBuilderSupported")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1139,8 +1174,12 @@ pub struct Image { ///

Describes a virtual machine that is used to create an image.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImageBuilder { + ///

The list of virtual private cloud (VPC) interface endpoint objects. Administrators can connect to the image builder only through the specified endpoints.

+ #[serde(rename = "AccessEndpoints")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_endpoints: Option>, ///

The version of the AppStream 2.0 agent that is currently being used by the image builder.

#[serde(rename = "AppstreamAgentVersion")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1169,6 +1208,10 @@ pub struct ImageBuilder { #[serde(rename = "EnableDefaultInternetAccess")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_default_internet_access: Option, + ///

The ARN of the IAM role that is applied to the image builder. To assume a role, the image builder calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

+ #[serde(rename = "IamRoleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub iam_role_arn: Option, ///

The ARN of the image from which this builder was created.

#[serde(rename = "ImageArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1184,6 +1227,9 @@ pub struct ImageBuilder { ///

The name of the image builder.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "NetworkAccessConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_access_configuration: Option, ///

The operating system platform of the image builder.

#[serde(rename = "Platform")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1204,7 +1250,7 @@ pub struct ImageBuilder { ///

Describes the reason why the last image builder state change occurred.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImageBuilderStateChangeReason { ///

The state change reason code.

#[serde(rename = "Code")] @@ -1231,7 +1277,7 @@ pub struct ImagePermissions { ///

Describes the reason why the last image state change occurred.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImageStateChangeReason { ///

The state change reason code.

#[serde(rename = "Code")] @@ -1245,7 +1291,7 @@ pub struct ImageStateChangeReason { ///

Describes the error that is returned when a usage report can't be generated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LastReportGenerationExecutionError { ///

The error code for the error that is returned when a usage report can't be generated.

#[serde(rename = "ErrorCode")] @@ -1269,7 +1315,7 @@ pub struct ListAssociatedFleetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssociatedFleetsResult { ///

The name of the fleet.

#[serde(rename = "Names")] @@ -1293,7 +1339,7 @@ pub struct ListAssociatedStacksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssociatedStacksResult { ///

The name of the stack.

#[serde(rename = "Names")] @@ -1313,7 +1359,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The information about the tags.

#[serde(rename = "Tags")] @@ -1321,9 +1367,9 @@ pub struct ListTagsForResourceResponse { pub tags: Option<::std::collections::HashMap>, } -///

Describes the network details of the fleet instance for the streaming session.

+///

Describes the network details of the fleet or image builder instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkAccessConfiguration { ///

The resource identifier of the elastic network interface that is attached to instances in your VPC. All network interfaces have the eni-xxxxxxxx resource identifier.

#[serde(rename = "EniId")] @@ -1337,7 +1383,7 @@ pub struct NetworkAccessConfiguration { ///

Describes a resource error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceError { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -1366,7 +1412,7 @@ pub struct ServiceAccountCredentials { ///

Describes a streaming session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Session { ///

The authentication method. The user is authenticated using a streaming URL (API) or SAML 2.0 federation (SAML).

#[serde(rename = "AuthenticationType")] @@ -1407,7 +1453,7 @@ pub struct Session { ///

Describes the permissions that are available to the specified AWS account for a shared image.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SharedImagePermissions { ///

Describes the permissions for a shared image.

#[serde(rename = "imagePermissions")] @@ -1419,8 +1465,12 @@ pub struct SharedImagePermissions { ///

Describes a stack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Stack { + ///

The list of virtual private cloud (VPC) interface endpoint objects. Users of the stack can connect to AppStream 2.0 only through the specified endpoints.

+ #[serde(rename = "AccessEndpoints")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_endpoints: Option>, ///

The persistent application settings for users of the stack.

#[serde(rename = "ApplicationSettings")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1468,7 +1518,7 @@ pub struct Stack { ///

Describes a stack error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StackError { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -1488,7 +1538,7 @@ pub struct StartFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartFleetResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1503,7 +1553,7 @@ pub struct StartImageBuilderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartImageBuilderResult { ///

Information about the image builder.

#[serde(rename = "ImageBuilder")] @@ -1519,7 +1569,7 @@ pub struct StopFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopFleetResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1530,7 +1580,7 @@ pub struct StopImageBuilderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopImageBuilderResult { ///

Information about the image builder.

#[serde(rename = "ImageBuilder")] @@ -1565,7 +1615,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1579,7 +1629,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1598,7 +1648,7 @@ pub struct UpdateDirectoryConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDirectoryConfigResult { ///

Information about the Directory Config object.

#[serde(rename = "DirectoryConfig")] @@ -1636,7 +1686,11 @@ pub struct UpdateFleetRequest { #[serde(rename = "EnableDefaultInternetAccess")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_default_internet_access: Option, - ///

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 900.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

+ ///

The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

+ #[serde(rename = "IamRoleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub iam_role_arn: Option, + ///

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

#[serde(rename = "IdleDisconnectTimeoutInSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub idle_disconnect_timeout_in_seconds: Option, @@ -1667,7 +1721,7 @@ pub struct UpdateFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFleetResult { ///

Information about the fleet.

#[serde(rename = "Fleet")] @@ -1689,11 +1743,15 @@ pub struct UpdateImagePermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateImagePermissionsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateStackRequest { + ///

The list of interface VPC endpoint (interface endpoint) objects. Users of the stack can connect to AppStream 2.0 only through the specified endpoints.

+ #[serde(rename = "AccessEndpoints")] + #[serde(skip_serializing_if = "Option::is_none")] + pub access_endpoints: Option>, ///

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

#[serde(rename = "ApplicationSettings")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1732,7 +1790,7 @@ pub struct UpdateStackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateStackResult { ///

Information about the stack.

#[serde(rename = "Stack")] @@ -1742,13 +1800,13 @@ pub struct UpdateStackResult { ///

Describes information about the usage report subscription.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsageReportSubscription { ///

The time when the last usage report was generated.

#[serde(rename = "LastGeneratedReportDate")] #[serde(skip_serializing_if = "Option::is_none")] pub last_generated_report_date: Option, - ///

The Amazon S3 bucket where generated reports are stored. When a usage report subscription is enabled for the first time for an account in an AWS Region, an S3 bucket is created. The bucket is unique to the AWS account and the Region.

+ ///

The Amazon S3 bucket where generated reports are stored.

If you enabled on-instance session scripts and Amazon S3 logging for your session script configuration, AppStream 2.0 created an S3 bucket to store the script output. The bucket is unique to your account and Region. When you enable usage reporting in this case, AppStream 2.0 uses the same bucket to store your usage reports. If you haven't already enabled on-instance session scripts, when you enable usage reports, AppStream 2.0 creates a new S3 bucket.

#[serde(rename = "S3BucketName")] #[serde(skip_serializing_if = "Option::is_none")] pub s3_bucket_name: Option, @@ -1756,7 +1814,7 @@ pub struct UsageReportSubscription { #[serde(rename = "Schedule")] #[serde(skip_serializing_if = "Option::is_none")] pub schedule: Option, - ///

The errors that are returned when usage reports can't be generated.

+ ///

The errors that were returned if usage reports couldn't be generated.

#[serde(rename = "SubscriptionErrors")] #[serde(skip_serializing_if = "Option::is_none")] pub subscription_errors: Option>, @@ -1764,7 +1822,7 @@ pub struct UsageReportSubscription { ///

Describes a user in the user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct User { ///

The ARN of the user.

#[serde(rename = "Arn")] @@ -1830,7 +1888,7 @@ pub struct UserStackAssociation { ///

Describes the error that is returned when a user can’t be associated with or disassociated from a stack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserStackAssociationError { ///

The error code for the error that is returned when a user can’t be associated with or disassociated from a stack.

#[serde(rename = "ErrorCode")] @@ -1853,7 +1911,7 @@ pub struct VpcConfig { #[serde(rename = "SecurityGroupIds")] #[serde(skip_serializing_if = "Option::is_none")] pub security_group_ids: Option>, - ///

The identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. Fleet instances use one or two subnets. Image builder instances use one subnet.

+ ///

The identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. Fleet instances use one or more subnets. Image builder instances use one subnet.

#[serde(rename = "SubnetIds")] #[serde(skip_serializing_if = "Option::is_none")] pub subnet_ids: Option>, @@ -2468,6 +2526,10 @@ impl Error for CreateStreamingURLError { pub enum CreateUsageReportSubscriptionError { ///

The resource cannot be created because your AWS account is suspended. For assistance, contact AWS Support.

InvalidAccountStatus(String), + ///

The specified role is invalid.

+ InvalidRole(String), + ///

The requested limit exceeds the permitted limit for an account.

+ LimitExceeded(String), } impl CreateUsageReportSubscriptionError { @@ -2481,6 +2543,16 @@ impl CreateUsageReportSubscriptionError { CreateUsageReportSubscriptionError::InvalidAccountStatus(err.msg), ) } + "InvalidRoleException" => { + return RusotoError::Service(CreateUsageReportSubscriptionError::InvalidRole( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(CreateUsageReportSubscriptionError::LimitExceeded( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2497,6 +2569,8 @@ impl Error for CreateUsageReportSubscriptionError { fn description(&self) -> &str { match *self { CreateUsageReportSubscriptionError::InvalidAccountStatus(ref cause) => cause, + CreateUsageReportSubscriptionError::InvalidRole(ref cause) => cause, + CreateUsageReportSubscriptionError::LimitExceeded(ref cause) => cause, } } } @@ -3517,10 +3591,14 @@ pub enum StartFleetError { ConcurrentModification(String), ///

The resource cannot be created because your AWS account is suspended. For assistance, contact AWS Support.

InvalidAccountStatus(String), + ///

The specified role is invalid.

+ InvalidRole(String), ///

The requested limit exceeds the permitted limit for an account.

LimitExceeded(String), ///

The attempted operation is not permitted.

OperationNotPermitted(String), + ///

The specified resource exists and is not in use, but isn't available.

+ ResourceNotAvailable(String), ///

The specified resource was not found.

ResourceNotFound(String), } @@ -3535,12 +3613,18 @@ impl StartFleetError { "InvalidAccountStatusException" => { return RusotoError::Service(StartFleetError::InvalidAccountStatus(err.msg)) } + "InvalidRoleException" => { + return RusotoError::Service(StartFleetError::InvalidRole(err.msg)) + } "LimitExceededException" => { return RusotoError::Service(StartFleetError::LimitExceeded(err.msg)) } "OperationNotPermittedException" => { return RusotoError::Service(StartFleetError::OperationNotPermitted(err.msg)) } + "ResourceNotAvailableException" => { + return RusotoError::Service(StartFleetError::ResourceNotAvailable(err.msg)) + } "ResourceNotFoundException" => { return RusotoError::Service(StartFleetError::ResourceNotFound(err.msg)) } @@ -3561,8 +3645,10 @@ impl Error for StartFleetError { match *self { StartFleetError::ConcurrentModification(ref cause) => cause, StartFleetError::InvalidAccountStatus(ref cause) => cause, + StartFleetError::InvalidRole(ref cause) => cause, StartFleetError::LimitExceeded(ref cause) => cause, StartFleetError::OperationNotPermitted(ref cause) => cause, + StartFleetError::ResourceNotAvailable(ref cause) => cause, StartFleetError::ResourceNotFound(ref cause) => cause, } } @@ -4284,7 +4370,7 @@ pub trait AppStream { input: ListAssociatedStacksRequest, ) -> RusotoFuture; - ///

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -4311,13 +4397,13 @@ pub trait AppStream { input: StopImageBuilderRequest, ) -> RusotoFuture; - ///

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

fn tag_resource( &self, input: TagResourceRequest, ) -> RusotoFuture; - ///

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

fn untag_resource( &self, input: UntagResourceRequest, @@ -4329,7 +4415,7 @@ pub trait AppStream { input: UpdateDirectoryConfigRequest, ) -> RusotoFuture; - ///

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName, ComputeCapacity, ImageARN, ImageName, and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

+ ///

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName, ComputeCapacity, ImageARN, ImageName, IdleDisconnectTimeoutInSeconds, and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

fn update_fleet( &self, input: UpdateFleetRequest, @@ -4359,10 +4445,7 @@ impl AppStreamClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AppStreamClient { - AppStreamClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4376,10 +4459,14 @@ impl AppStreamClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AppStreamClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AppStreamClient { + AppStreamClient { client, region } } } @@ -5440,7 +5527,7 @@ impl AppStream for AppStreamClient { }) } - ///

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -5583,7 +5670,7 @@ impl AppStream for AppStreamClient { }) } - ///

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

fn tag_resource( &self, input: TagResourceRequest, @@ -5612,7 +5699,7 @@ impl AppStream for AppStreamClient { }) } - ///

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

+ ///

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

fn untag_resource( &self, input: UntagResourceRequest, @@ -5672,7 +5759,7 @@ impl AppStream for AppStreamClient { }) } - ///

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName, ComputeCapacity, ImageARN, ImageName, and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

+ ///

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName, ComputeCapacity, ImageARN, ImageName, IdleDisconnectTimeoutInSeconds, and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

fn update_fleet( &self, input: UpdateFleetRequest, diff --git a/rusoto/services/appstream/src/lib.rs b/rusoto/services/appstream/src/lib.rs index b9c0b92f26a..1a72114a530 100644 --- a/rusoto/services/appstream/src/lib.rs +++ b/rusoto/services/appstream/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon AppStream 2.0

This is the Amazon AppStream 2.0 API Reference. It provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed application streaming service. You centrally manage your desktop applications on AppStream 2.0 and securely deliver them to any computer. AppStream 2.0 manages the AWS resources required to host and run your applications, scales automatically, and provides access to your users on demand.

To learn more about AppStream 2.0, see the following resources:

+//!

Amazon AppStream 2.0

This is the Amazon AppStream 2.0 API Reference. This documentation provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed, secure application streaming service that lets you stream desktop applications to users without rewriting applications. AppStream 2.0 manages the AWS resources that are required to host and run your applications, scales automatically, and provides access to your users on demand.

You can call the AppStream 2.0 API operations by using an interface VPC endpoint (interface endpoint). For more information, see Access AppStream 2.0 API Operations and CLI Commands Through an Interface VPC Endpoint in the Amazon AppStream 2.0 Administration Guide.

To learn more about AppStream 2.0, see the following resources:

//! //! If you're using the service, you're probably looking for [AppStreamClient](struct.AppStreamClient.html) and [AppStream](trait.AppStream.html). diff --git a/rusoto/services/appsync/Cargo.toml b/rusoto/services/appsync/Cargo.toml index 4554d8fb351..287f753c4e5 100644 --- a/rusoto/services/appsync/Cargo.toml +++ b/rusoto/services/appsync/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_appsync" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/appsync/README.md b/rusoto/services/appsync/README.md index 8a156611d15..22f5481b7bf 100644 --- a/rusoto/services/appsync/README.md +++ b/rusoto/services/appsync/README.md @@ -23,9 +23,16 @@ To use `rusoto_appsync` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_appsync = "0.40.0" +rusoto_appsync = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/appsync/src/custom/mod.rs b/rusoto/services/appsync/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/appsync/src/custom/mod.rs +++ b/rusoto/services/appsync/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/appsync/src/generated.rs b/rusoto/services/appsync/src/generated.rs index dcb67759249..c41250f6cc8 100644 --- a/rusoto/services/appsync/src/generated.rs +++ b/rusoto/services/appsync/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -44,7 +43,7 @@ pub struct AdditionalAuthenticationProvider { ///

Describes an API key.

Customers invoke AWS AppSync GraphQL API operations with API keys as an identity mechanism. There are two key versions:

da1: This version was introduced at launch in November 2017. These keys always expire after 7 days. Key expiration is managed by Amazon DynamoDB TTL. The keys ceased to be valid after February 21, 2018 and should not be used after that date.

  • ListApiKeys returns the expiration time in milliseconds.

  • CreateApiKey returns the expiration time in milliseconds.

  • UpdateApiKey is not available for this key version.

  • DeleteApiKey deletes the item from the table.

  • Expiration is stored in Amazon DynamoDB as milliseconds. This results in a bug where keys are not automatically deleted because DynamoDB expects the TTL to be stored in seconds. As a one-time action, we will delete these keys from the table after February 21, 2018.

da2: This version was introduced in February 2018 when AppSync added support to extend key expiration.

  • ListApiKeys returns the expiration time in seconds.

  • CreateApiKey returns the expiration time in seconds and accepts a user-provided expiration time in seconds.

  • UpdateApiKey returns the expiration time in seconds and accepts a user-provided expiration time in seconds. Key expiration can only be updated while the key has not expired.

  • DeleteApiKey deletes the item from the table.

  • Expiration is stored in Amazon DynamoDB as seconds.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApiKey { ///

A description of the purpose of the API key.

#[serde(rename = "description")] @@ -116,7 +115,7 @@ pub struct CreateApiKeyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApiKeyResponse { ///

The API key.

#[serde(rename = "apiKey")] @@ -166,7 +165,7 @@ pub struct CreateDataSourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDataSourceResponse { ///

The DataSource object.

#[serde(rename = "dataSource")] @@ -202,7 +201,7 @@ pub struct CreateFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFunctionResponse { ///

The Function object.

#[serde(rename = "functionConfiguration")] @@ -241,7 +240,7 @@ pub struct CreateGraphqlApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGraphqlApiResponse { ///

The GraphqlApi.

#[serde(rename = "graphqlApi")] @@ -282,7 +281,7 @@ pub struct CreateResolverRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResolverResponse { ///

The Resolver object.

#[serde(rename = "resolver")] @@ -304,7 +303,7 @@ pub struct CreateTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTypeResponse { ///

The Type object.

#[serde(rename = "type")] @@ -314,7 +313,7 @@ pub struct CreateTypeResponse { ///

Describes a data source.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DataSource { ///

The data source ARN.

#[serde(rename = "dataSourceArn")] @@ -369,7 +368,7 @@ pub struct DeleteApiKeyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApiKeyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -383,7 +382,7 @@ pub struct DeleteDataSourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDataSourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -397,7 +396,7 @@ pub struct DeleteFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFunctionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -408,7 +407,7 @@ pub struct DeleteGraphqlApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGraphqlApiResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -425,7 +424,7 @@ pub struct DeleteResolverRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResolverResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -439,7 +438,7 @@ pub struct DeleteTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTypeResponse {} ///

Describes an Amazon DynamoDB data source configuration.

@@ -470,7 +469,7 @@ pub struct ElasticsearchDataSourceConfig { ///

A function is a reusable entity. Multiple functions can be used to compose the resolver logic.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FunctionConfiguration { ///

The name of the DataSource.

#[serde(rename = "dataSourceName")] @@ -517,7 +516,7 @@ pub struct GetDataSourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDataSourceResponse { ///

The DataSource object.

#[serde(rename = "dataSource")] @@ -536,7 +535,7 @@ pub struct GetFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFunctionResponse { ///

The Function object.

#[serde(rename = "functionConfiguration")] @@ -552,7 +551,7 @@ pub struct GetGraphqlApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGraphqlApiResponse { ///

The GraphqlApi object.

#[serde(rename = "graphqlApi")] @@ -594,7 +593,7 @@ pub struct GetResolverRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResolverResponse { ///

The Resolver object.

#[serde(rename = "resolver")] @@ -610,7 +609,7 @@ pub struct GetSchemaCreationStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSchemaCreationStatusResponse { ///

Detailed information about the status of the schema creation operation.

#[serde(rename = "details")] @@ -636,7 +635,7 @@ pub struct GetTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTypeResponse { ///

The Type object.

#[serde(rename = "type")] @@ -646,7 +645,7 @@ pub struct GetTypeResponse { ///

Describes a GraphQL API.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GraphqlApi { ///

A list of additional authentication providers for the GraphqlApi API.

#[serde(rename = "additionalAuthenticationProviders")] @@ -727,7 +726,7 @@ pub struct ListApiKeysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApiKeysResponse { ///

The ApiKey objects.

#[serde(rename = "apiKeys")] @@ -755,7 +754,7 @@ pub struct ListDataSourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDataSourcesResponse { ///

The DataSource objects.

#[serde(rename = "dataSources")] @@ -783,7 +782,7 @@ pub struct ListFunctionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFunctionsResponse { ///

A list of Function objects.

#[serde(rename = "functions")] @@ -808,7 +807,7 @@ pub struct ListGraphqlApisRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGraphqlApisResponse { ///

The GraphqlApi objects.

#[serde(rename = "graphqlApis")] @@ -839,7 +838,7 @@ pub struct ListResolversByFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResolversByFunctionResponse { ///

An identifier that can be used to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -870,7 +869,7 @@ pub struct ListResolversRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResolversResponse { ///

An identifier to be passed in the next request to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -890,7 +889,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

A TagMap object.

#[serde(rename = "tags")] @@ -917,7 +916,7 @@ pub struct ListTypesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTypesResponse { ///

An identifier to be passed in the next request to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -935,6 +934,10 @@ pub struct LogConfig { ///

The service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account.

#[serde(rename = "cloudWatchLogsRoleArn")] pub cloud_watch_logs_role_arn: String, + ///

Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level.

+ #[serde(rename = "excludeVerboseContent")] + #[serde(skip_serializing_if = "Option::is_none")] + pub exclude_verbose_content: Option, ///

The field logging level. Values can be NONE, ERROR, or ALL.

  • NONE: No field-level logs are captured.

  • ERROR: Logs the following information only for the fields that are in error:

    • The error section in the server response.

    • Field-level errors.

    • The generated request/response functions that got resolved for error fields.

  • ALL: The following information is logged for all fields in the query:

    • Field-level tracing information.

    • The generated request/response functions that got resolved for each field.

#[serde(rename = "fieldLogLevel")] pub field_log_level: String, @@ -1009,7 +1012,7 @@ pub struct RelationalDatabaseDataSourceConfig { ///

Describes a resolver.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resolver { ///

The resolver data source name.

#[serde(rename = "dataSourceName")] @@ -1061,7 +1064,7 @@ pub struct StartSchemaCreationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSchemaCreationResponse { ///

The current state of the schema (PROCESSING, FAILED, SUCCESS, or NOT_APPLICABLE). When the schema is in the ACTIVE state, you can add data.

#[serde(rename = "status")] @@ -1080,12 +1083,12 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Describes a type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Type { ///

The type ARN.

#[serde(rename = "arn")] @@ -1120,7 +1123,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1142,7 +1145,7 @@ pub struct UpdateApiKeyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateApiKeyResponse { ///

The API key.

#[serde(rename = "apiKey")] @@ -1192,7 +1195,7 @@ pub struct UpdateDataSourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDataSourceResponse { ///

The updated DataSource object.

#[serde(rename = "dataSource")] @@ -1231,7 +1234,7 @@ pub struct UpdateFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFunctionResponse { ///

The Function object.

#[serde(rename = "functionConfiguration")] @@ -1270,7 +1273,7 @@ pub struct UpdateGraphqlApiRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGraphqlApiResponse { ///

The updated GraphqlApi object.

#[serde(rename = "graphqlApi")] @@ -1311,7 +1314,7 @@ pub struct UpdateResolverRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateResolverResponse { ///

The updated Resolver object.

#[serde(rename = "resolver")] @@ -1337,7 +1340,7 @@ pub struct UpdateTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTypeResponse { ///

The updated Type object.

#[serde(rename = "type")] @@ -3620,10 +3623,7 @@ impl AppSyncClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AppSyncClient { - AppSyncClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3637,10 +3637,14 @@ impl AppSyncClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AppSyncClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AppSyncClient { + AppSyncClient { client, region } } } diff --git a/rusoto/services/athena/Cargo.toml b/rusoto/services/athena/Cargo.toml index f8bed9659be..ab29fccf22c 100644 --- a/rusoto/services/athena/Cargo.toml +++ b/rusoto/services/athena/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_athena" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/athena/README.md b/rusoto/services/athena/README.md index 7b3c7694594..52801212677 100644 --- a/rusoto/services/athena/README.md +++ b/rusoto/services/athena/README.md @@ -23,9 +23,16 @@ To use `rusoto_athena` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_athena = "0.40.0" +rusoto_athena = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/athena/src/custom/mod.rs b/rusoto/services/athena/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/athena/src/custom/mod.rs +++ b/rusoto/services/athena/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/athena/src/generated.rs b/rusoto/services/athena/src/generated.rs index 67ccb583d64..b0aed9a4b29 100644 --- a/rusoto/services/athena/src/generated.rs +++ b/rusoto/services/athena/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct BatchGetNamedQueryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetNamedQueryOutput { ///

Information about the named query IDs submitted.

#[serde(rename = "NamedQueries")] @@ -52,7 +51,7 @@ pub struct BatchGetQueryExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetQueryExecutionOutput { ///

Information about a query execution.

#[serde(rename = "QueryExecutions")] @@ -66,7 +65,7 @@ pub struct BatchGetQueryExecutionOutput { ///

Information about the columns in a query execution result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ColumnInfo { ///

Indicates whether values in the column are case-sensitive.

#[serde(rename = "CaseSensitive")] @@ -134,7 +133,7 @@ pub struct CreateNamedQueryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNamedQueryOutput { ///

The unique ID of the query.

#[serde(rename = "NamedQueryId")] @@ -162,12 +161,12 @@ pub struct CreateWorkGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWorkGroupOutput {} ///

A piece of data (a field in the table).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Datum { ///

The value of the datum.

#[serde(rename = "VarCharValue")] @@ -183,7 +182,7 @@ pub struct DeleteNamedQueryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteNamedQueryOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -198,7 +197,7 @@ pub struct DeleteWorkGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWorkGroupOutput {} ///

If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS) and key information.

@@ -221,7 +220,7 @@ pub struct GetNamedQueryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetNamedQueryOutput { ///

Information about the query.

#[serde(rename = "NamedQuery")] @@ -237,7 +236,7 @@ pub struct GetQueryExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetQueryExecutionOutput { ///

Information about the query execution.

#[serde(rename = "QueryExecution")] @@ -261,7 +260,7 @@ pub struct GetQueryResultsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetQueryResultsOutput { ///

A token to be used by the next request if this request is truncated.

#[serde(rename = "NextToken")] @@ -285,7 +284,7 @@ pub struct GetWorkGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetWorkGroupOutput { ///

Information about the workgroup.

#[serde(rename = "WorkGroup")] @@ -310,7 +309,7 @@ pub struct ListNamedQueriesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListNamedQueriesOutput { ///

The list of unique query IDs.

#[serde(rename = "NamedQueryIds")] @@ -339,7 +338,7 @@ pub struct ListQueryExecutionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListQueryExecutionsOutput { ///

A token to be used by the next request if this request is truncated.

#[serde(rename = "NextToken")] @@ -367,7 +366,7 @@ pub struct ListTagsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceOutput { ///

A token to be used by the next request if this request is truncated.

#[serde(rename = "NextToken")] @@ -392,7 +391,7 @@ pub struct ListWorkGroupsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWorkGroupsOutput { ///

A token to be used by the next request if this request is truncated.

#[serde(rename = "NextToken")] @@ -406,7 +405,7 @@ pub struct ListWorkGroupsOutput { ///

A query, where QueryString is the list of SQL query statements that comprise the query.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NamedQuery { ///

The database to which the query belongs.

#[serde(rename = "Database")] @@ -433,7 +432,7 @@ pub struct NamedQuery { ///

Information about a single instance of a query execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryExecution { ///

The SQL query statements which the query execution ran.

#[serde(rename = "Query")] @@ -455,7 +454,7 @@ pub struct QueryExecution { #[serde(rename = "StatementType")] #[serde(skip_serializing_if = "Option::is_none")] pub statement_type: Option, - ///

The amount of data scanned during the query execution and the amount of time that it took to execute, and the type of statement that was run.

+ ///

The location of a manifest file that tracks file locations generated by the query, the amount of data scanned by the query, and the amount of time that it took the query to run.

#[serde(rename = "Statistics")] #[serde(skip_serializing_if = "Option::is_none")] pub statistics: Option, @@ -478,10 +477,14 @@ pub struct QueryExecutionContext { pub database: Option, } -///

The amount of data scanned during the query execution and the amount of time that it took to execute, and the type of statement that was run.

+///

The location of a manifest file that tracks file locations generated by the query, the amount of data scanned by the query, and the amount of time that it took the query to run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryExecutionStatistics { + ///

The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. It tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Output Files in the Amazon Athena User Guide.

+ #[serde(rename = "DataManifestLocation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub data_manifest_location: Option, ///

The number of bytes in the data that was queried.

#[serde(rename = "DataScannedInBytes")] #[serde(skip_serializing_if = "Option::is_none")] @@ -494,7 +497,7 @@ pub struct QueryExecutionStatistics { ///

The completion date, current state, submission time, and state change reason (if applicable) for the query execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryExecutionStatus { ///

The date and time that the query completed.

#[serde(rename = "CompletionDateTime")] @@ -514,14 +517,14 @@ pub struct QueryExecutionStatus { pub submission_date_time: Option, } -///

The location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. These are known as "client-side settings". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup.

+///

The location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. These are known as "client-side settings". If workgroup settings override client-side settings, then the query uses the workgroup settings.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ResultConfiguration { ///

If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.

#[serde(rename = "EncryptionConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption_configuration: Option, - ///

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The "workgroup settings override" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

+ ///

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

#[serde(rename = "OutputLocation")] #[serde(skip_serializing_if = "Option::is_none")] pub output_location: Option, @@ -534,7 +537,7 @@ pub struct ResultConfigurationUpdates { #[serde(rename = "EncryptionConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption_configuration: Option, - ///

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The "workgroup settings override" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

+ ///

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The "workgroup settings override" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

#[serde(rename = "OutputLocation")] #[serde(skip_serializing_if = "Option::is_none")] pub output_location: Option, @@ -550,7 +553,7 @@ pub struct ResultConfigurationUpdates { ///

The metadata and rows that comprise a query result set. The metadata describes the column structure and data types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResultSet { ///

The metadata that describes the column structure and data types of a table of query results.

#[serde(rename = "ResultSetMetadata")] @@ -564,7 +567,7 @@ pub struct ResultSet { ///

The metadata that describes the column structure and data types of a table of query results.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResultSetMetadata { ///

Information about the columns returned in a query result metadata.

#[serde(rename = "ColumnInfo")] @@ -574,7 +577,7 @@ pub struct ResultSetMetadata { ///

The rows that comprise a query result table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Row { ///

The data that populates a row in a query result table.

#[serde(rename = "Data")] @@ -606,7 +609,7 @@ pub struct StartQueryExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartQueryExecutionOutput { ///

The unique ID of the query that ran as a result of this request.

#[serde(rename = "QueryExecutionId")] @@ -622,7 +625,7 @@ pub struct StopQueryExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopQueryExecutionOutput {} ///

A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource.

@@ -649,12 +652,12 @@ pub struct TagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceOutput {} ///

Information about a named query ID that could not be processed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnprocessedNamedQueryId { ///

The error code returned when the processing request for the named query failed, if applicable.

#[serde(rename = "ErrorCode")] @@ -672,7 +675,7 @@ pub struct UnprocessedNamedQueryId { ///

Describes a query execution that failed to process.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnprocessedQueryExecutionId { ///

The error code returned when the query execution failed to process, if applicable.

#[serde(rename = "ErrorCode")] @@ -699,7 +702,7 @@ pub struct UntagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -722,14 +725,14 @@ pub struct UpdateWorkGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateWorkGroupOutput {} ///

A workgroup, which contains a name, description, creation time, state, and other configuration, listed under WorkGroup$Configuration. Each workgroup enables you to isolate queries for you or your group of users from other queries in the same account, to configure the query results location and the encryption configuration (known as workgroup settings), to enable sending query metrics to Amazon CloudWatch, and to establish per-query data usage control limits for all queries in a workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkGroup { - ///

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limit for the amount of data scanned per query, if it is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

+ ///

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

#[serde(rename = "Configuration")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration: Option, @@ -750,7 +753,7 @@ pub struct WorkGroup { pub state: Option, } -///

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limit for the amount of data scanned per query, if it is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

+///

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct WorkGroupConfiguration { ///

The upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan.

@@ -765,7 +768,11 @@ pub struct WorkGroupConfiguration { #[serde(rename = "PublishCloudWatchMetricsEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub publish_cloud_watch_metrics_enabled: Option, - ///

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results.

+ ///

If set to true, allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide.

+ #[serde(rename = "RequesterPaysEnabled")] + #[serde(skip_serializing_if = "Option::is_none")] + pub requester_pays_enabled: Option, + ///

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results.

#[serde(rename = "ResultConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub result_configuration: Option, @@ -790,6 +797,10 @@ pub struct WorkGroupConfigurationUpdates { #[serde(rename = "RemoveBytesScannedCutoffPerQuery")] #[serde(skip_serializing_if = "Option::is_none")] pub remove_bytes_scanned_cutoff_per_query: Option, + ///

If set to true, allows members assigned to a workgroup to specify Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide.

+ #[serde(rename = "RequesterPaysEnabled")] + #[serde(skip_serializing_if = "Option::is_none")] + pub requester_pays_enabled: Option, ///

The result configuration information about the queries in this workgroup that will be updated. Includes the updated results location and an updated option for encrypting query results.

#[serde(rename = "ResultConfigurationUpdates")] #[serde(skip_serializing_if = "Option::is_none")] @@ -798,7 +809,7 @@ pub struct WorkGroupConfigurationUpdates { ///

The summary information for the workgroup, which includes its name, state, description, and the date and time it was created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkGroupSummary { ///

The workgroup creation date and time.

#[serde(rename = "CreationTime")] @@ -1639,7 +1650,7 @@ pub trait Athena { input: GetQueryExecutionInput, ) -> RusotoFuture; - ///

Returns the results of a single query execution specified by QueryExecutionId if you have access to the workgroup in which the query ran. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

+ ///

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

fn get_query_results( &self, input: GetQueryResultsInput, @@ -1717,10 +1728,7 @@ impl AthenaClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AthenaClient { - AthenaClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1734,10 +1742,14 @@ impl AthenaClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AthenaClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AthenaClient { + AthenaClient { client, region } } } @@ -1973,7 +1985,7 @@ impl Athena for AthenaClient { }) } - ///

Returns the results of a single query execution specified by QueryExecutionId if you have access to the workgroup in which the query ran. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

+ ///

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

fn get_query_results( &self, input: GetQueryResultsInput, diff --git a/rusoto/services/autoscaling-plans/Cargo.toml b/rusoto/services/autoscaling-plans/Cargo.toml index 7a4ff0528e8..4098c3709d5 100644 --- a/rusoto/services/autoscaling-plans/Cargo.toml +++ b/rusoto/services/autoscaling-plans/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_autoscaling_plans" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/autoscaling-plans/README.md b/rusoto/services/autoscaling-plans/README.md index 0c2f4976391..5c897458823 100644 --- a/rusoto/services/autoscaling-plans/README.md +++ b/rusoto/services/autoscaling-plans/README.md @@ -23,9 +23,16 @@ To use `rusoto_autoscaling_plans` in your application, add it as a dependency in ```toml [dependencies] -rusoto_autoscaling_plans = "0.40.0" +rusoto_autoscaling_plans = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/autoscaling-plans/src/custom/mod.rs b/rusoto/services/autoscaling-plans/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/autoscaling-plans/src/custom/mod.rs +++ b/rusoto/services/autoscaling-plans/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/autoscaling-plans/src/generated.rs b/rusoto/services/autoscaling-plans/src/generated.rs index 2026cd1b754..ebafad14b3d 100644 --- a/rusoto/services/autoscaling-plans/src/generated.rs +++ b/rusoto/services/autoscaling-plans/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -51,7 +50,7 @@ pub struct CreateScalingPlanRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateScalingPlanResponse { ///

The version number of the scaling plan. This value is always 1.

Currently, you cannot specify multiple scaling plan versions.

#[serde(rename = "ScalingPlanVersion")] @@ -104,7 +103,7 @@ pub struct CustomizedScalingMetricSpecification { ///

Represents a single value in the forecast data used for predictive scaling.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Datapoint { ///

The time stamp for the data point in UTC format.

#[serde(rename = "Timestamp")] @@ -127,7 +126,7 @@ pub struct DeleteScalingPlanRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteScalingPlanResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -149,7 +148,7 @@ pub struct DescribeScalingPlanResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScalingPlanResourcesResponse { ///

The token required to get the next set of results. This value is null if there are no more results to return.

#[serde(rename = "NextToken")] @@ -186,7 +185,7 @@ pub struct DescribeScalingPlansRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScalingPlansResponse { ///

The token required to get the next set of results. This value is null if there are no more results to return.

#[serde(rename = "NextToken")] @@ -227,7 +226,7 @@ pub struct GetScalingPlanResourceForecastDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetScalingPlanResourceForecastDataResponse { ///

The data points to return.

#[serde(rename = "Datapoints")] @@ -326,7 +325,7 @@ pub struct ScalingInstruction { ///

Represents a scaling plan.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalingPlan { ///

The application source.

#[serde(rename = "ApplicationSource")] @@ -359,7 +358,7 @@ pub struct ScalingPlan { ///

Represents a scalable resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalingPlanResource { ///

The ID of the resource. This string consists of the resource type and unique identifier.

  • Auto Scaling group - The resource type is autoScalingGroup and the unique identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

#[serde(rename = "ResourceId")] @@ -391,7 +390,7 @@ pub struct ScalingPlanResource { ///

Represents a scaling policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalingPolicy { ///

The name of the scaling policy.

#[serde(rename = "PolicyName")] @@ -469,7 +468,7 @@ pub struct UpdateScalingPlanRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateScalingPlanResponse {} /// Errors returned by CreateScalingPlan @@ -801,10 +800,7 @@ impl AutoscalingPlansClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AutoscalingPlansClient { - AutoscalingPlansClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -818,10 +814,14 @@ impl AutoscalingPlansClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AutoscalingPlansClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AutoscalingPlansClient { + AutoscalingPlansClient { client, region } } } diff --git a/rusoto/services/autoscaling/Cargo.toml b/rusoto/services/autoscaling/Cargo.toml index 9062c871fe2..d374ca1df22 100644 --- a/rusoto/services/autoscaling/Cargo.toml +++ b/rusoto/services/autoscaling/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_autoscaling" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/autoscaling/README.md b/rusoto/services/autoscaling/README.md index 377b087c911..37015f72dcc 100644 --- a/rusoto/services/autoscaling/README.md +++ b/rusoto/services/autoscaling/README.md @@ -23,9 +23,16 @@ To use `rusoto_autoscaling` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_autoscaling = "0.40.0" +rusoto_autoscaling = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/autoscaling/src/custom/mod.rs b/rusoto/services/autoscaling/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/autoscaling/src/custom/mod.rs +++ b/rusoto/services/autoscaling/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/autoscaling/src/generated.rs b/rusoto/services/autoscaling/src/generated.rs index cc5852a6acc..70c619c25d3 100644 --- a/rusoto/services/autoscaling/src/generated.rs +++ b/rusoto/services/autoscaling/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -1362,33 +1361,33 @@ impl CooldownDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateAutoScalingGroupType { - ///

The name of the Auto Scaling group. This name must be unique within the scope of your AWS account.

+ ///

The name of the Auto Scaling group. This name must be unique per Region per account.

pub auto_scaling_group_name: String, ///

One or more Availability Zones for the group. This parameter is optional if you specify one or more subnets for VPCZoneIdentifier.

Conditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into EC2-Classic.

pub availability_zones: Option>, ///

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

pub default_cooldown: Option, - ///

The number of EC2 instances that should be running in the group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.

+ ///

The number of Amazon EC2 instances that the Auto Scaling group attempts to maintain. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.

pub desired_capacity: Option, - ///

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0.

For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you are adding an ELB health check.

+ ///

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you are adding an ELB health check.

pub health_check_grace_period: Option, ///

The service to use for the health checks. The valid values are EC2 and ELB. The default value is EC2. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.

For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

pub health_check_type: Option, - ///

The ID of the instance used to create a launch configuration for the group. This parameter, a launch configuration, a launch template, or a mixed instances policy must be specified.

When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.

For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

+ ///

The ID of the instance used to create a launch configuration for the group.

When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.

For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

pub instance_id: Option, - ///

The name of the launch configuration. This parameter, a launch template, a mixed instances policy, or an EC2 instance must be specified.

For more information, see Creating an Auto Scaling Group Using a Launch Configuration in the Amazon EC2 Auto Scaling User Guide.

+ ///

The name of the launch configuration.

If you do not specify LaunchConfigurationName, you must specify one of the following parameters: InstanceId, LaunchTemplate, or MixedInstancesPolicy.

pub launch_configuration_name: Option, - ///

The launch template to use to launch instances. This parameter, a launch configuration, a mixed instances policy, or an EC2 instance must be specified.

For more information, see Creating an Auto Scaling Group Using a Launch Template in the Amazon EC2 Auto Scaling User Guide.

+ ///

The launch template to use to launch instances.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

If you do not specify LaunchTemplate, you must specify one of the following parameters: InstanceId, LaunchConfigurationName, or MixedInstancesPolicy.

pub launch_template: Option, ///

One or more lifecycle hooks.

pub lifecycle_hook_specification_list: Option>, - ///

One or more Classic Load Balancers. To specify an Application Load Balancer or a Network Load Balancer, use TargetGroupARNs instead.

For more information, see Using a Load Balancer With an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

+ ///

A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers and Network Load Balancers, specify a list of target groups using the TargetGroupARNs property instead.

For more information, see Using a Load Balancer with an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

pub load_balancer_names: Option>, ///

The maximum size of the group.

pub max_size: i64, ///

The minimum size of the group.

pub min_size: i64, - ///

The mixed instances policy to use to launch instances. This parameter, a launch template, a launch configuration, or an EC2 instance must be specified.

For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

+ ///

An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.

The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity, but also the parameters that specify the instance configuration information—the launch template and instance types.

For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

pub mixed_instances_policy: Option, ///

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.

For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

pub new_instances_protected_from_scale_in: Option, @@ -1398,7 +1397,7 @@ pub struct CreateAutoScalingGroupType { pub service_linked_role_arn: Option, ///

One or more tags.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

pub tags: Option>, - ///

The Amazon Resource Names (ARN) of the target groups.

+ ///

The Amazon Resource Names (ARN) of the target groups to associate with the Auto Scaling group. Instances are registered as targets in a target group, and traffic is routed to the target group.

For more information, see Using a Load Balancer with an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

pub target_group_ar_ns: Option>, ///

One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide.

pub termination_policies: Option>, @@ -1520,41 +1519,41 @@ impl CreateAutoScalingGroupTypeSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateLaunchConfigurationType { - ///

Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this parameter, be sure to specify at least one subnet when you create your group.

Default: If the instance is launched into a default subnet, the default is to assign a public IP address. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address.

+ ///

For Auto Scaling groups that are running in a virtual private cloud (VPC), specifies whether to assign a public IP address to the group's instances. If you specify true, each instance in the Auto Scaling group receives a unique public IP address. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

If the instance is launched into a default subnet, the default is to assign a public IP address, unless you disabled the option to assign a public IP address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address, unless you enabled the option to assign a public IP address on the subnet.

pub associate_public_ip_address: Option, - ///

One or more mappings that specify how block devices are exposed to the instance. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

+ ///

A block device mapping, which specifies the block devices for the instance. You can specify virtual devices and EBS volumes. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

pub block_device_mappings: Option>, - ///

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter is supported only if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

+ ///

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

This parameter can only be used if you are launching EC2-Classic instances.

pub classic_link_vpc_id: Option, - ///

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you specify a ClassicLink-enabled VPC, and is not supported otherwise.

+ ///

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify the ClassicLinkVPCId parameter, you must specify this parameter.

pub classic_link_vpc_security_groups: Option>, - ///

Indicates whether the instance is optimized for Amazon EBS I/O. By default, the instance is not optimized for EBS I/O. The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional usage charges apply. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

+ ///

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

pub ebs_optimized: Option, - ///

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

EC2 instances launched with an IAM role automatically have AWS security credentials available. You can use IAM roles with Amazon EC2 Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Use an IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

+ ///

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

pub iam_instance_profile: Option, - ///

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

If you do not specify InstanceId, you must specify ImageId.

For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

+ ///

The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

If you do not specify InstanceId, you must specify ImageId.

pub image_id: Option, - ///

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.

If you do not specify InstanceId, you must specify both ImageId and InstanceType.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Create a Launch Configuration Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

+ ///

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Create a Launch Configuration Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

If you do not specify InstanceId, you must specify both ImageId and InstanceType.

pub instance_id: Option, - ///

Enables detailed monitoring (true) or basic monitoring (false) for the Auto Scaling instances. The default value is true.

+ ///

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

pub instance_monitoring: Option, - ///

The instance type of the EC2 instance.

If you do not specify InstanceId, you must specify InstanceType.

For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.

+ ///

Specifies the instance type of the EC2 instance.

For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.

If you do not specify InstanceId, you must specify InstanceType.

pub instance_type: Option, ///

The ID of the kernel associated with the AMI.

pub kernel_id: Option, ///

The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.

pub key_name: Option, - ///

The name of the launch configuration. This name must be unique within the scope of your AWS account.

+ ///

The name of the launch configuration. This name must be unique per Region per account.

pub launch_configuration_name: String, - ///

The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware and can only be launched into a VPC.

To launch Dedicated Instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this parameter to dedicated.

If you specify this parameter, be sure to specify at least one subnet when you create your group.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

Valid values: default | dedicated

+ ///

The tenancy of the instance. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this parameter to dedicated.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

Valid values: default | dedicated

pub placement_tenancy: Option, - ///

The ID of the RAM disk associated with the AMI.

+ ///

The ID of the RAM disk to select.

pub ramdisk_id: Option, - ///

One or more security groups with which to associate the instances.

If your instances are launched in EC2-Classic, you can either specify security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon EC2 User Guide for Linux Instances.

If your instances are launched into a VPC, specify security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

+ ///

A list that contains the security groups to assign to the instances in the Auto Scaling group.

[EC2-VPC] Specify the security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

[EC2-Classic] Specify either the security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon EC2 User Guide for Linux Instances.

pub security_groups: Option>, - ///

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

+ ///

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

If a Spot price is set, then the Auto Scaling group will only launch instances when the Spot price has been met, regardless of the setting in the Auto Scaling group's DesiredCapacity.

When you change your Spot price by creating a new launch configuration, running instances will continue to run as long as the Spot price for those running instances is higher than the current Spot market price.

pub spot_price: Option, - ///

The user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

+ ///

The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

pub user_data: Option, } @@ -1917,9 +1916,9 @@ impl DeleteTagsTypeSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeAccountLimitsAnswer { - ///

The maximum number of groups allowed for your AWS account. The default limit is 200 per region.

+ ///

The maximum number of groups allowed for your AWS account. The default limit is 200 per AWS Region.

pub max_number_of_auto_scaling_groups: Option, - ///

The maximum number of launch configurations allowed for your AWS account. The default limit is 200 per region.

+ ///

The maximum number of launch configurations allowed for your AWS account. The default limit is 200 per AWS Region.

pub max_number_of_launch_configurations: Option, ///

The current number of groups for your AWS account.

pub number_of_auto_scaling_groups: Option, @@ -2831,15 +2830,15 @@ impl DisableScaleInDeserializer { ///

Describes an Amazon EBS volume. Used in combination with BlockDeviceMapping.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Ebs { - ///

Indicates whether the volume is deleted on instance termination. The default value is true.

+ ///

Indicates whether the volume is deleted on instance termination. For Amazon EC2 Auto Scaling, the default value is true.

pub delete_on_termination: Option, - ///

Specifies whether the volume should be encrypted. Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or an unencrypted volume from an encrypted snapshot. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon EC2 User Guide for Linux Instances.

+ ///

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.

pub encrypted: Option, - ///

The number of I/O operations per second (IOPS) to provision for the volume. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Conditional: This parameter is required when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.)

+ ///

The number of I/O operations per second (IOPS) to provision for the volume. The maximum ratio of IOPS to volume size (in GiB) is 50:1. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Conditional: This parameter is required when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.)

pub iops: Option, - ///

The ID of the snapshot. This parameter is optional if you specify a volume size.

+ ///

The snapshot ID of the volume to use.

Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater than the size of the snapshot.

pub snapshot_id: Option, - ///

The volume size, in GiB.

Constraints: 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

At least one of VolumeSize or SnapshotId is required.

+ ///

The volume size, in Gibibytes (GiB).

This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

At least one of VolumeSize or SnapshotId is required.

pub volume_size: Option, ///

The volume type, which can be standard for Magnetic, io1 for Provisioned IOPS SSD, gp2 for General Purpose SSD, st1 for Throughput Optimized HDD, or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Valid values: standard | io1 | gp2 | st1 | sc1

pub volume_type: Option, @@ -3098,7 +3097,7 @@ pub struct ExecutePolicyType { pub auto_scaling_group_name: Option, ///

The breach threshold for the alarm.

Conditional: This parameter is required if the policy type is StepScaling and not supported otherwise.

pub breach_threshold: Option, - ///

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

This parameter is not supported if the policy type is StepScaling.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

+ ///

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

This parameter is not supported if the policy type is StepScaling or TargetTrackingScaling.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

pub honor_cooldown: Option, ///

The metric value to compare to BreachThreshold. This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.

If you specify a metric value that doesn't correspond to a step adjustment for the policy, the call returns an error.

Conditional: This parameter is required if the policy type is StepScaling and not supported otherwise.

pub metric_value: Option, @@ -3480,18 +3479,18 @@ impl InstancesDeserializer { }) } } -///

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types.

+///

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

#[derive(Default, Debug, Clone, PartialEq)] pub struct InstancesDistribution { ///

Indicates how to allocate instance types to fulfill On-Demand capacity.

The only valid value is prioritized, which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

pub on_demand_allocation_strategy: Option, ///

The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.

The default value is 0. If you leave this parameter set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.

pub on_demand_base_capacity: Option, - ///

Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.

The range is 0–100. The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.

+ ///

Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity. The range is 0–100.

The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.

pub on_demand_percentage_above_base_capacity: Option, - ///

Indicates how to allocate Spot capacity across Spot pools.

The only valid value is lowest-price, which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify.

+ ///

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized, the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.

The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price. The default Spot allocation strategy for the AWS Management Console is capacity-optimized.

Valid values: lowest-price | capacity-optimized

pub spot_allocation_strategy: Option, - ///

The number of Spot pools to use to allocate your Spot capacity. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate.

The range is 1–20 and the default is 2.

+ ///

The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate. The range is 1–20. The default value is 2.

Valid only when the Spot allocation strategy is lowest-price.

pub spot_instance_pools: Option, ///

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.

To remove a value that you previously set, include the parameter but leave the value blank.

pub spot_max_price: Option, @@ -3597,43 +3596,43 @@ impl InstancesDistributionSerializer { ///

Describes a launch configuration.

#[derive(Default, Debug, Clone, PartialEq)] pub struct LaunchConfiguration { - ///

[EC2-VPC] Indicates whether to assign a public IP address to each instance.

+ ///

For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

pub associate_public_ip_address: Option, - ///

A block device mapping, which specifies the block devices for the instance.

+ ///

A block device mapping, which specifies the block devices for the instance.

For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

pub block_device_mappings: Option>, - ///

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter can only be used if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

+ ///

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

pub classic_link_vpc_id: Option, - ///

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you specify a ClassicLink-enabled VPC, and cannot be used otherwise.

+ ///

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

pub classic_link_vpc_security_groups: Option>, ///

The creation date and time for the launch configuration.

pub created_time: String, - ///

Controls whether the instance is optimized for EBS I/O (true) or not (false).

+ ///

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false).

For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

pub ebs_optimized: Option, - ///

The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

+ ///

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

pub iam_instance_profile: Option, - ///

The ID of the Amazon Machine Image (AMI).

+ ///

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

pub image_id: String, - ///

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

+ ///

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

pub instance_monitoring: Option, - ///

The instance type for the instances.

+ ///

The instance type for the instances.

For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.

pub instance_type: String, ///

The ID of the kernel associated with the AMI.

pub kernel_id: Option, - ///

The name of the key pair.

+ ///

The name of the key pair.

For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.

pub key_name: Option, ///

The Amazon Resource Name (ARN) of the launch configuration.

pub launch_configuration_arn: Option, ///

The name of the launch configuration.

pub launch_configuration_name: String, - ///

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs in an isolated, single-tenant hardware and can only be launched into a VPC.

+ ///

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

pub placement_tenancy: Option, ///

The ID of the RAM disk associated with the AMI.

pub ramdisk_id: Option, - ///

The security groups to associate with the instances.

+ ///

A list that contains the security groups to assign to the instances in the Auto Scaling group.

For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

pub security_groups: Option>, - ///

The price to bid when launching Spot Instances.

+ ///

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price.

For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

pub spot_price: Option, - ///

The user data available to the instances.

+ ///

The Base64-encoded user data to make available to the launched EC2 instances.

For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

pub user_data: Option, } @@ -3885,7 +3884,7 @@ impl LaunchConfigurationsTypeDeserializer { pub struct LaunchTemplate { ///

The launch template to use. You must specify either the launch template ID or launch template name in the request.

pub launch_template_specification: Option, - ///

Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type.

You must specify between 2 and 20 overrides.

+ ///

Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You must specify between 2 and 20 overrides.

pub overrides: Option>, } @@ -4083,7 +4082,7 @@ impl LifecycleActionResultDeserializer { Ok(obj) } } -///

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances. Used in response to DescribeLifecycleHooks.

+///

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or terminates instances. Used in response to DescribeLifecycleHooks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct LifecycleHook { ///

The name of the Auto Scaling group for the lifecycle hook.

@@ -4184,7 +4183,7 @@ impl LifecycleHookNamesSerializer { } } -///

Describes a lifecycle hook. Used in combination with CreateAutoScalingGroup.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. You can modify an existing lifecycle hook or create new lifecycle hooks using PutLifecycleHook. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

+///

Describes a lifecycle hook. Used in combination with CreateAutoScalingGroup.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. You can modify an existing lifecycle hook or create new lifecycle hooks using PutLifecycleHook. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

#[derive(Default, Debug, Clone, PartialEq)] pub struct LifecycleHookSpecification { ///

Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON. The default value is ABANDON.

@@ -4755,12 +4754,12 @@ impl MixedInstanceSpotPriceDeserializer { Ok(obj) } } -///

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. Used in combination with CreateAutoScalingGroup. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

When you create your Auto Scaling group, you can specify a launch configuration or template as a parameter for the top-level object, or you can specify a mixed instances policy, but not both at the same time.

+///

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup.

#[derive(Default, Debug, Clone, PartialEq)] pub struct MixedInstancesPolicy { - ///

The instances distribution to use.

If you leave this parameter unspecified when creating the group, the default values are used.

+ ///

The instances distribution to use.

If you leave this parameter unspecified when creating a mixed instances policy, the default values are used.

pub instances_distribution: Option, - ///

The launch template and overrides.

This parameter is required when creating an Auto Scaling group with a mixed instances policy, but is not required when updating the group.

+ ///

The launch template and instance types (overrides).

This parameter must be specified when creating a mixed instances policy.

pub launch_template: Option, } @@ -5084,7 +5083,7 @@ impl PolicyTypesSerializer { pub struct PredefinedMetricSpecification { ///

The metric type.

pub predefined_metric_type: String, - ///

Identifies the resource associated with the metric type. The following predefined metrics are available:

  • ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling group.

  • ASGAverageNetworkIn - Average number of bytes received on all network interfaces by the Auto Scaling group.

  • ASGAverageNetworkOut - Average number of bytes sent out on all network interfaces by the Auto Scaling group.

  • ALBRequestCountPerTarget - Number of requests completed per target in an Application Load Balancer or a Network Load Balancer target group.

For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, and ASGAverageNetworkOut, the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget, the parameter must be specified in the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and targetgroup/target-group-name/target-group-id is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.

+ ///

Identifies the resource associated with the metric type. The following predefined metrics are available:

  • ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling group.

  • ASGAverageNetworkIn - Average number of bytes received on all network interfaces by the Auto Scaling group.

  • ASGAverageNetworkOut - Average number of bytes sent out on all network interfaces by the Auto Scaling group.

  • ALBRequestCountPerTarget - Number of requests completed per target in an Application Load Balancer target group.

For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, and ASGAverageNetworkOut, the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget, the parameter must be specified in the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and targetgroup/target-group-name/target-group-id is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.

pub resource_label: Option, } @@ -5359,17 +5358,17 @@ impl PutNotificationConfigurationTypeSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct PutScalingPolicyType { - ///

The adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

This parameter is supported if the policy type is SimpleScaling or StepScaling.

For more information, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

+ ///

Specifies whether the ScalingAdjustment parameter is an absolute number or a percentage of the current capacity. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

Valid only if the policy type is StepScaling or SimpleScaling. For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide.

pub adjustment_type: Option, ///

The name of the Auto Scaling group.

pub auto_scaling_group_name: String, - ///

The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.

This parameter is supported if the policy type is SimpleScaling.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

+ ///

The amount of time, in seconds, after a scaling activity completes before any further dynamic scaling activities can start. If this parameter is not specified, the default cooldown period for the group applies.

Valid only if the policy type is SimpleScaling. For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

pub cooldown: Option, - ///

The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.

This parameter is supported if the policy type is StepScaling or TargetTrackingScaling.

+ ///

The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.

Valid only if the policy type is StepScaling or TargetTrackingScaling.

pub estimated_instance_warmup: Option, - ///

The aggregation type for the CloudWatch metrics. The valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

This parameter is supported if the policy type is StepScaling.

+ ///

The aggregation type for the CloudWatch metrics. The valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

Valid only if the policy type is StepScaling.

pub metric_aggregation_type: Option, - ///

The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

This parameter is supported if the policy type is SimpleScaling or StepScaling.

+ ///

The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

This property replaces the MinAdjustmentStep property. For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.

Valid only if the policy type is SimpleScaling or StepScaling.

pub min_adjustment_magnitude: Option, ///

Available for backward compatibility. Use MinAdjustmentMagnitude instead.

pub min_adjustment_step: Option, @@ -5377,11 +5376,11 @@ pub struct PutScalingPolicyType { pub policy_name: String, ///

The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. If the policy type is null, the value is treated as SimpleScaling.

pub policy_type: Option, - ///

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

Conditional: This parameter is required if the policy type is SimpleScaling and not supported otherwise.

+ ///

The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.

Conditional: If you specify SimpleScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

pub scaling_adjustment: Option, - ///

A set of adjustments that enable you to scale based on the size of the alarm breach.

Conditional: This parameter is required if the policy type is StepScaling and not supported otherwise.

+ ///

A set of adjustments that enable you to scale based on the size of the alarm breach.

Conditional: If you specify StepScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

pub step_adjustments: Option>, - ///

A target tracking scaling policy. Includes support for predefined or customized metrics.

Conditional: This parameter is required if the policy type is TargetTrackingScaling and not supported otherwise.

+ ///

A target tracking scaling policy. Includes support for predefined or customized metrics.

For more information, see TargetTrackingConfiguration in the Amazon EC2 Auto Scaling API Reference.

Conditional: If you specify TargetTrackingScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

pub target_tracking_configuration: Option, } @@ -5453,21 +5452,21 @@ impl PutScalingPolicyTypeSerializer { pub struct PutScheduledUpdateGroupActionType { ///

The name of the Auto Scaling group.

pub auto_scaling_group_name: String, - ///

The number of EC2 instances that should be running in the group.

+ ///

The number of EC2 instances that should be running in the Auto Scaling group.

pub desired_capacity: Option, - ///

The time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

+ ///

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

pub end_time: Option, - ///

The maximum size for the Auto Scaling group.

+ ///

The maximum number of instances in the Auto Scaling group.

pub max_size: Option, - ///

The minimum size for the Auto Scaling group.

+ ///

The minimum number of instances in the Auto Scaling group.

pub min_size: Option, - ///

The recurring schedule for this action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, "30 0 1 1,6,12 *"). For more information about this format, see Crontab.

+ ///

The recurring schedule for this action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, "30 0 1 1,6,12 *"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

pub recurrence: Option, ///

The name of this scaling action.

pub scheduled_action_name: String, - ///

The time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, "2019-06-01T00:00:00Z").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

+ ///

The date and time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, "2019-06-01T00:00:00Z").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

pub start_time: Option, - ///

This parameter is deprecated.

+ ///

This parameter is no longer used.

pub time: Option, } @@ -5634,7 +5633,7 @@ pub struct ScalingPolicy { pub policy_arn: Option, ///

The name of the scaling policy.

pub policy_name: Option, - ///

The policy type. The valid values are SimpleScaling and StepScaling.

+ ///

The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling.

pub policy_type: Option, ///

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

pub scaling_adjustment: Option, @@ -5821,21 +5820,21 @@ pub struct ScheduledUpdateGroupAction { pub auto_scaling_group_name: Option, ///

The number of instances you prefer to maintain in the group.

pub desired_capacity: Option, - ///

The date and time that the action is scheduled to end.

+ ///

The date and time in UTC for the recurring schedule to end. For example, "2019-06-01T00:00:00Z".

pub end_time: Option, - ///

The maximum size of the group.

+ ///

The maximum number of instances in the Auto Scaling group.

pub max_size: Option, - ///

The minimum size of the group.

+ ///

The minimum number of instances in the Auto Scaling group.

pub min_size: Option, - ///

The recurring schedule for the action.

+ ///

The recurring schedule for the action, in Unix cron syntax format.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

pub recurrence: Option, ///

The Amazon Resource Name (ARN) of the scheduled action.

pub scheduled_action_arn: Option, ///

The name of the scheduled action.

pub scheduled_action_name: Option, - ///

The date and time that the action is scheduled to begin.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

+ ///

The date and time in UTC for this action to start. For example, "2019-06-01T00:00:00Z".

pub start_time: Option, - ///

This parameter is deprecated.

+ ///

This parameter is no longer used.

pub time: Option, } @@ -5917,17 +5916,17 @@ impl ScheduledUpdateGroupActionDeserializer { pub struct ScheduledUpdateGroupActionRequest { ///

The number of EC2 instances that should be running in the group.

pub desired_capacity: Option, - ///

The time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

+ ///

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

pub end_time: Option, - ///

The maximum size of the group.

+ ///

The maximum number of instances in the Auto Scaling group.

pub max_size: Option, - ///

The minimum size of the group.

+ ///

The minimum number of instances in the Auto Scaling group.

pub min_size: Option, - ///

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, "30 0 1 1,6,12 *"). For more information about this format, see Crontab.

+ ///

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, "30 0 1 1,6,12 *"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

pub recurrence: Option, ///

The name of the scaling action.

pub scheduled_action_name: String, - ///

The time for the action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, "2019-06-01T00:00:00Z").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule the action in the past, Amazon EC2 Auto Scaling returns an error message.

+ ///

The date and time for the action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, "2019-06-01T00:00:00Z").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule the action in the past, Amazon EC2 Auto Scaling returns an error message.

pub start_time: Option, } @@ -6522,11 +6521,11 @@ impl TargetGroupARNsSerializer { ///

Represents a target tracking scaling policy configuration to use with Amazon EC2 Auto Scaling.

#[derive(Default, Debug, Clone, PartialEq)] pub struct TargetTrackingConfiguration { - ///

A customized metric. You can specify either a predefined metric or a customized metric.

+ ///

A customized metric. You must specify either a predefined metric or a customized metric.

pub customized_metric_specification: Option, - ///

Indicates whether scaling in by the target tracking scaling policy is disabled. If scaling in is disabled, the target tracking scaling policy doesn't remove instances from the Auto Scaling group. Otherwise, the target tracking scaling policy can remove instances from the Auto Scaling group. The default is disabled.

+ ///

Indicates whether scaling in by the target tracking scaling policy is disabled. If scaling in is disabled, the target tracking scaling policy doesn't remove instances from the Auto Scaling group. Otherwise, the target tracking scaling policy can remove instances from the Auto Scaling group. The default is false.

pub disable_scale_in: Option, - ///

A predefined metric. You can specify either a predefined metric or a customized metric.

+ ///

A predefined metric. You must specify either a predefined metric or a customized metric.

pub predefined_metric_specification: Option, ///

The target value for the metric.

pub target_value: f64, @@ -6679,23 +6678,23 @@ pub struct UpdateAutoScalingGroupType { pub auto_scaling_group_name: String, ///

One or more Availability Zones for the group.

pub availability_zones: Option>, - ///

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

+ ///

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300. This cooldown period is not used when a scaling-specific cooldown is specified.

Cooldown periods are not supported for target tracking scaling policies, step scaling policies, or scheduled scaling. For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

pub default_cooldown: Option, ///

The number of EC2 instances that should be running in the Auto Scaling group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

pub desired_capacity: Option, - ///

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0.

For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you are adding an ELB health check.

+ ///

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you are adding an ELB health check.

pub health_check_grace_period: Option, ///

The service to use for the health checks. The valid values are EC2 and ELB. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.

pub health_check_type: Option, - ///

The name of the launch configuration. If you specify this parameter, you can't specify a launch template or a mixed instances policy.

+ ///

The name of the launch configuration. If you specify LaunchConfigurationName in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy.

To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to false, you must first disable the collection of group metrics. Otherwise, you get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.

pub launch_configuration_name: Option, - ///

The launch template and version to use to specify the updates. If you specify this parameter, you can't specify a launch configuration or a mixed instances policy.

+ ///

The launch template and version to use to specify the updates. If you specify LaunchTemplate in your update request, you can't specify LaunchConfigurationName or MixedInstancesPolicy.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

pub launch_template: Option, ///

The maximum size of the Auto Scaling group.

pub max_size: Option, ///

The minimum size of the Auto Scaling group.

pub min_size: Option, - ///

The mixed instances policy to use to specify the updates. If you specify this parameter, you can't specify a launch configuration or a launch template.

For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

+ ///

An embedded object that specifies a mixed instances policy.

In your call to UpdateAutoScalingGroup, you can make changes to the policy that is specified. All optional parameters are left unchanged if not specified.

For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

pub mixed_instances_policy: Option, ///

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.

For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

pub new_instances_protected_from_scale_in: Option, @@ -6705,7 +6704,7 @@ pub struct UpdateAutoScalingGroupType { pub service_linked_role_arn: Option, ///

A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide.

pub termination_policies: Option>, - ///

A comma-separated list of subnet IDs, if you are launching into a VPC.

If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify for this parameter must reside in those Availability Zones.

+ ///

A comma-separated list of subnet IDs for virtual private cloud (VPC).

If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify for this parameter must reside in those Availability Zones.

pub vpc_zone_identifier: Option, } @@ -10024,7 +10023,7 @@ pub trait Autoscaling { input: DeleteNotificationConfigurationType, ) -> RusotoFuture<(), DeleteNotificationConfigurationError>; - ///

Deletes the specified scaling policy.

Deleting a policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

+ ///

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Deleting a Scaling Policy in the Amazon EC2 Auto Scaling User Guide.

fn delete_policy(&self, input: DeletePolicyType) -> RusotoFuture<(), DeletePolicyError>; ///

Deletes the specified scheduled action.

@@ -10126,7 +10125,7 @@ pub trait Autoscaling { &self, ) -> RusotoFuture; - ///

Describes the actions scheduled for your Auto Scaling group that haven't run. To describe the actions that have already run, use DescribeScalingActivities.

+ ///

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.

fn describe_scheduled_actions( &self, input: DescribeScheduledActionsType, @@ -10185,7 +10184,7 @@ pub trait Autoscaling { input: ExitStandbyQuery, ) -> RusotoFuture; - ///

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

+ ///

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

fn put_lifecycle_hook( &self, input: PutLifecycleHookType, @@ -10197,7 +10196,7 @@ pub trait Autoscaling { input: PutNotificationConfigurationType, ) -> RusotoFuture<(), PutNotificationConfigurationError>; - ///

Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

+ ///

Creates or updates a scaling policy for an Auto Scaling group. To update an existing scaling policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

For more information about using scaling policies to scale your Auto Scaling group automatically, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

fn put_scaling_policy( &self, input: PutScalingPolicyType, @@ -10251,7 +10250,7 @@ pub trait Autoscaling { input: TerminateInstanceInAutoScalingGroupType, ) -> RusotoFuture; - ///

Updates the configuration for the specified Auto Scaling group.

The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.

To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to false, you must first disable the collection of group metrics. Otherwise, you get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.

Note the following:

  • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MinSize.

  • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MaxSize.

  • All other optional parameters are left unchanged if not specified.

+ ///

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

  • If a scale-in event occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

  • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, this sets the group's DesiredCapacity to the new MinSize value.

  • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, this sets the group's DesiredCapacity to the new MaxSize value.

To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.

fn update_auto_scaling_group( &self, input: UpdateAutoScalingGroupType, @@ -10269,10 +10268,7 @@ impl AutoscalingClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AutoscalingClient { - AutoscalingClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -10286,10 +10282,14 @@ impl AutoscalingClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AutoscalingClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AutoscalingClient { + AutoscalingClient { client, region } } } @@ -10352,7 +10352,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10402,7 +10402,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10450,7 +10450,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10503,7 +10503,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10551,7 +10551,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10729,7 +10729,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10775,7 +10775,7 @@ impl Autoscaling for AutoscalingClient { }) } - ///

Deletes the specified scaling policy.

Deleting a policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

+ ///

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Deleting a Scaling Policy in the Amazon EC2 Auto Scaling User Guide.

fn delete_policy(&self, input: DeletePolicyType) -> RusotoFuture<(), DeletePolicyError> { let mut request = SignedRequest::new("POST", "autoscaling", &self.region, "/"); let mut params = Params::new(); @@ -10878,7 +10878,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10925,7 +10925,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10973,7 +10973,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11021,7 +11021,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11073,7 +11073,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11121,7 +11121,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11168,7 +11168,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11216,7 +11216,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11267,7 +11267,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11315,7 +11315,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11362,7 +11362,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11415,7 +11415,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11466,7 +11466,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11514,7 +11514,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11561,7 +11561,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11580,7 +11580,7 @@ impl Autoscaling for AutoscalingClient { }) } - ///

Describes the actions scheduled for your Auto Scaling group that haven't run. To describe the actions that have already run, use DescribeScalingActivities.

+ ///

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.

fn describe_scheduled_actions( &self, input: DescribeScheduledActionsType, @@ -11609,7 +11609,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11657,7 +11657,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11702,7 +11702,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11753,7 +11753,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11802,7 +11802,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11852,7 +11852,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11953,7 +11953,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12029,7 +12029,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12048,7 +12048,7 @@ impl Autoscaling for AutoscalingClient { }) } - ///

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

+ ///

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

fn put_lifecycle_hook( &self, input: PutLifecycleHookType, @@ -12080,7 +12080,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12124,7 +12124,7 @@ impl Autoscaling for AutoscalingClient { }) } - ///

Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

+ ///

Creates or updates a scaling policy for an Auto Scaling group. To update an existing scaling policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

For more information about using scaling policies to scale your Auto Scaling group automatically, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

fn put_scaling_policy( &self, input: PutScalingPolicyType, @@ -12156,7 +12156,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12230,7 +12230,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12362,7 +12362,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12440,7 +12440,7 @@ impl Autoscaling for AutoscalingClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12459,7 +12459,7 @@ impl Autoscaling for AutoscalingClient { }) } - ///

Updates the configuration for the specified Auto Scaling group.

The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.

To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to false, you must first disable the collection of group metrics. Otherwise, you get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.

Note the following:

  • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MinSize.

  • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MaxSize.

  • All other optional parameters are left unchanged if not specified.

+ ///

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

  • If a scale-in event occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

  • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, this sets the group's DesiredCapacity to the new MinSize value.

  • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, this sets the group's DesiredCapacity to the new MaxSize value.

To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.

fn update_auto_scaling_group( &self, input: UpdateAutoScalingGroupType, diff --git a/rusoto/services/autoscaling/src/lib.rs b/rusoto/services/autoscaling/src/lib.rs index dbb6e257191..1e6f1f41bdd 100644 --- a/rusoto/services/autoscaling/src/lib.rs +++ b/rusoto/services/autoscaling/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing.

For more information, including information about granting IAM users required permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto Scaling User Guide.

+//!

Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing.

For more information, including information about granting IAM users required permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto Scaling User Guide.

//! //! If you're using the service, you're probably looking for [AutoscalingClient](struct.AutoscalingClient.html) and [Autoscaling](trait.Autoscaling.html). diff --git a/rusoto/services/batch/Cargo.toml b/rusoto/services/batch/Cargo.toml index f8f37ac264a..3a5a12a298c 100644 --- a/rusoto/services/batch/Cargo.toml +++ b/rusoto/services/batch/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_batch" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/batch/README.md b/rusoto/services/batch/README.md index d35c0ac3753..79b38b42e07 100644 --- a/rusoto/services/batch/README.md +++ b/rusoto/services/batch/README.md @@ -23,9 +23,16 @@ To use `rusoto_batch` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_batch = "0.40.0" +rusoto_batch = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/batch/src/custom/mod.rs b/rusoto/services/batch/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/batch/src/custom/mod.rs +++ b/rusoto/services/batch/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/batch/src/generated.rs b/rusoto/services/batch/src/generated.rs index 26aacbf4087..db6308fad98 100644 --- a/rusoto/services/batch/src/generated.rs +++ b/rusoto/services/batch/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -35,7 +34,7 @@ pub struct ArrayProperties { ///

An object representing the array properties of a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArrayPropertiesDetail { ///

The job index within the array that is associated with this job. This parameter is returned for array job children.

#[serde(rename = "index")] @@ -53,7 +52,7 @@ pub struct ArrayPropertiesDetail { ///

An object representing the array properties of a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArrayPropertiesSummary { ///

The job index within the array that is associated with this job. This parameter is returned for children of array jobs.

#[serde(rename = "index")] @@ -67,7 +66,7 @@ pub struct ArrayPropertiesSummary { ///

An object representing the details of a container that is part of a job attempt.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttemptContainerDetail { ///

The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.

#[serde(rename = "containerInstanceArn")] @@ -97,7 +96,7 @@ pub struct AttemptContainerDetail { ///

An object representing a job attempt.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttemptDetail { ///

Details about the container in this job attempt.

#[serde(rename = "container")] @@ -128,12 +127,12 @@ pub struct CancelJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelJobResponse {} ///

An object representing an AWS Batch compute environment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComputeEnvironmentDetail { ///

The Amazon Resource Name (ARN) of the compute environment.

#[serde(rename = "computeEnvironmentArn")] @@ -228,7 +227,7 @@ pub struct ComputeResource { #[serde(rename = "spotIamFleetRole")] #[serde(skip_serializing_if = "Option::is_none")] pub spot_iam_fleet_role: Option, - ///

The VPC subnets into which the compute resources are launched.

+ ///

The VPC subnets into which the compute resources are launched. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

#[serde(rename = "subnets")] pub subnets: Vec, ///

Key-value pair tags to be applied to resources that are launched in the compute environment. For AWS Batch, these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value—for example, { "Name": "AWS Batch Instance - C4OnDemand" }.

@@ -259,7 +258,7 @@ pub struct ComputeResourceUpdate { ///

An object representing the details of a container that is part of a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContainerDetail { ///

The command that is passed to the container.

#[serde(rename = "command")] @@ -289,6 +288,10 @@ pub struct ContainerDetail { #[serde(rename = "jobRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub job_role_arn: Option, + ///

Linux-specific modifications that are applied to the container, such as details for device mappings.

+ #[serde(rename = "linuxParameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub linux_parameters: Option, ///

The name of the CloudWatch Logs log stream associated with the container. The log group for AWS Batch jobs is /aws/batch/job. Each container attempt receives a log stream name when they reach the RUNNING status.

#[serde(rename = "logStreamName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -395,6 +398,10 @@ pub struct ContainerProperties { #[serde(rename = "jobRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub job_role_arn: Option, + ///

Linux-specific modifications that are applied to the container, such as details for device mappings.

+ #[serde(rename = "linuxParameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub linux_parameters: Option, ///

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job.

If you are trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

#[serde(rename = "memory")] #[serde(skip_serializing_if = "Option::is_none")] @@ -435,7 +442,7 @@ pub struct ContainerProperties { ///

An object representing summary details of a container within a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContainerSummary { ///

The exit code to return upon completion.

#[serde(rename = "exitCode")] @@ -469,7 +476,7 @@ pub struct CreateComputeEnvironmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateComputeEnvironmentResponse { ///

The Amazon Resource Name (ARN) of the compute environment.

#[serde(rename = "computeEnvironmentArn")] @@ -499,7 +506,7 @@ pub struct CreateJobQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobQueueResponse { ///

The Amazon Resource Name (ARN) of the job queue.

#[serde(rename = "jobQueueArn")] @@ -517,7 +524,7 @@ pub struct DeleteComputeEnvironmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteComputeEnvironmentResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -528,7 +535,7 @@ pub struct DeleteJobQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteJobQueueResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -539,7 +546,7 @@ pub struct DeregisterJobDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterJobDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -559,7 +566,7 @@ pub struct DescribeComputeEnvironmentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeComputeEnvironmentsResponse { ///

The list of compute environments.

#[serde(rename = "computeEnvironments")] @@ -596,7 +603,7 @@ pub struct DescribeJobDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobDefinitionsResponse { ///

The list of job definitions.

#[serde(rename = "jobDefinitions")] @@ -625,7 +632,7 @@ pub struct DescribeJobQueuesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobQueuesResponse { ///

The list of job queues.

#[serde(rename = "jobQueues")] @@ -645,7 +652,7 @@ pub struct DescribeJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobsResponse { ///

The list of jobs.

#[serde(rename = "jobs")] @@ -653,6 +660,22 @@ pub struct DescribeJobsResponse { pub jobs: Option>, } +///

An object representing a container instance host device.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Device { + ///

The path inside the container at which to expose the host device. By default the hostPath value is used.

+ #[serde(rename = "containerPath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub container_path: Option, + ///

The path for the device on the host container instance.

+ #[serde(rename = "hostPath")] + pub host_path: String, + ///

The explicit permissions to provide to the container for the device. By default, the container has permissions for read, write, and mknod for the device.

+ #[serde(rename = "permissions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub permissions: Option>, +} + ///

Determine whether your data volume persists on the host container instance and where it is stored. If this parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Host { @@ -664,7 +687,7 @@ pub struct Host { ///

An object representing an AWS Batch job definition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobDefinition { ///

An object with various properties specific to container-based jobs.

#[serde(rename = "containerProperties")] @@ -719,7 +742,7 @@ pub struct JobDependency { ///

An object representing an AWS Batch job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobDetail { ///

The array properties of the job, if it is an array job.

#[serde(rename = "arrayProperties")] @@ -772,7 +795,7 @@ pub struct JobDetail { ///

The Unix timestamp (in seconds and milliseconds) for when the job was started (when the job transitioned from the STARTING state to the RUNNING state).

#[serde(rename = "startedAt")] pub started_at: i64, - ///

The current status for the job.

If your jobs do not progress to STARTING, see Jobs Stuck in RUNNABLE Status in the troubleshooting section of the AWS Batch User Guide.

+ ///

The current status for the job.

If your jobs do not progress to STARTING, see Jobs Stuck in RUNNABLE Status in the troubleshooting section of the AWS Batch User Guide.

#[serde(rename = "status")] pub status: String, ///

A short, human-readable string to provide additional details about the current status of the job.

@@ -791,7 +814,7 @@ pub struct JobDetail { ///

An object representing the details of an AWS Batch job queue.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobQueueDetail { ///

The compute environments that are attached to the job queue and the order in which job placement is preferred. Compute environments are selected for job placement in ascending order.

#[serde(rename = "computeEnvironmentOrder")] @@ -820,7 +843,7 @@ pub struct JobQueueDetail { ///

An object representing summary details of a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobSummary { ///

The array properties of the job, if it is an array job.

#[serde(rename = "arrayProperties")] @@ -901,6 +924,15 @@ pub struct LaunchTemplateSpecification { pub version: Option, } +///

Linux-specific modifications that are applied to the container, such as details for device mappings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct LinuxParameters { + ///

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

+ #[serde(rename = "devices")] + #[serde(skip_serializing_if = "Option::is_none")] + pub devices: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListJobsRequest { ///

The job ID for an array job. Specifying an array job ID with this parameter lists all child jobs from within the specified array.

@@ -930,7 +962,7 @@ pub struct ListJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResponse { ///

A list of job summaries that match the request.

#[serde(rename = "jobSummaryList")] @@ -960,7 +992,7 @@ pub struct MountPoint { ///

An object representing the elastic network interface for a multi-node parallel job node.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkInterface { ///

The attachment ID for the network interface.

#[serde(rename = "attachmentId")] @@ -978,7 +1010,7 @@ pub struct NetworkInterface { ///

An object representing the details of a multi-node parallel job node.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NodeDetails { ///

Specifies whether the current node is the main node for a multi-node parallel job.

#[serde(rename = "isMainNode")] @@ -1019,7 +1051,7 @@ pub struct NodeProperties { ///

An object representing the properties of a node that is associated with a multi-node parallel job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NodePropertiesSummary { ///

Specifies whether the current node is the main node for a multi-node parallel job.

#[serde(rename = "isMainNode")] @@ -1090,7 +1122,7 @@ pub struct RegisterJobDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterJobDefinitionResponse { ///

The Amazon Resource Name (ARN) of the job definition.

#[serde(rename = "jobDefinitionArn")] @@ -1165,7 +1197,7 @@ pub struct SubmitJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubmitJobResponse { ///

The unique identifier for the job.

#[serde(rename = "jobId")] @@ -1186,7 +1218,7 @@ pub struct TerminateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminateJobResponse {} ///

The ulimit settings to pass to the container.

@@ -1223,7 +1255,7 @@ pub struct UpdateComputeEnvironmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateComputeEnvironmentResponse { ///

The Amazon Resource Name (ARN) of the compute environment.

#[serde(rename = "computeEnvironmentArn")] @@ -1255,7 +1287,7 @@ pub struct UpdateJobQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateJobQueueResponse { ///

The Amazon Resource Name (ARN) of the job queue.

#[serde(rename = "jobQueueArn")] @@ -2001,10 +2033,7 @@ impl BatchClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> BatchClient { - BatchClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2018,10 +2047,14 @@ impl BatchClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - BatchClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> BatchClient { + BatchClient { client, region } } } diff --git a/rusoto/services/budgets/Cargo.toml b/rusoto/services/budgets/Cargo.toml index f6ce02ac0ef..47d1d74ea6b 100644 --- a/rusoto/services/budgets/Cargo.toml +++ b/rusoto/services/budgets/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_budgets" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/budgets/README.md b/rusoto/services/budgets/README.md index 9f100080b02..619e4bbead2 100644 --- a/rusoto/services/budgets/README.md +++ b/rusoto/services/budgets/README.md @@ -23,9 +23,16 @@ To use `rusoto_budgets` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_budgets = "0.40.0" +rusoto_budgets = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/budgets/src/custom/mod.rs b/rusoto/services/budgets/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/budgets/src/custom/mod.rs +++ b/rusoto/services/budgets/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/budgets/src/generated.rs b/rusoto/services/budgets/src/generated.rs index 574b9bb2375..73d44fe7b6f 100644 --- a/rusoto/services/budgets/src/generated.rs +++ b/rusoto/services/budgets/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -68,7 +67,7 @@ pub struct Budget { ///

A history of the state of a budget at the end of the budget's specified time period.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BudgetPerformanceHistory { #[serde(rename = "BudgetName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -95,7 +94,7 @@ pub struct BudgetPerformanceHistory { ///

The amount of cost or usage that you created the budget for, compared to your actual costs or usage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BudgetedAndActualAmounts { ///

Your actual costs or usage for a budget period.

#[serde(rename = "ActualAmount")] @@ -189,7 +188,7 @@ pub struct CreateBudgetRequest { ///

Response of CreateBudget

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBudgetResponse {} ///

Request of CreateNotification

@@ -211,7 +210,7 @@ pub struct CreateNotificationRequest { ///

Response of CreateNotification

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNotificationResponse {} ///

Request of CreateSubscriber

@@ -233,7 +232,7 @@ pub struct CreateSubscriberRequest { ///

Response of CreateSubscriber

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSubscriberResponse {} ///

Request of DeleteBudget

@@ -249,7 +248,7 @@ pub struct DeleteBudgetRequest { ///

Response of DeleteBudget

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBudgetResponse {} ///

Request of DeleteNotification

@@ -268,7 +267,7 @@ pub struct DeleteNotificationRequest { ///

Response of DeleteNotification

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteNotificationResponse {} ///

Request of DeleteSubscriber

@@ -290,7 +289,7 @@ pub struct DeleteSubscriberRequest { ///

Response of DeleteSubscriber

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSubscriberResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -312,7 +311,7 @@ pub struct DescribeBudgetPerformanceHistoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBudgetPerformanceHistoryResponse { ///

The history of how often the budget has gone into an ALARM state.

For DAILY budgets, the history saves the state of the budget for the last 60 days. For MONTHLY budgets, the history saves the state of the budget for the current month plus the last 12 months. For QUARTERLY budgets, the history saves the state of the budget for the last four quarters.

#[serde(rename = "BudgetPerformanceHistory")] @@ -336,7 +335,7 @@ pub struct DescribeBudgetRequest { ///

Response of DescribeBudget

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBudgetResponse { ///

The description of the budget.

#[serde(rename = "Budget")] @@ -362,7 +361,7 @@ pub struct DescribeBudgetsRequest { ///

Response of DescribeBudgets

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBudgetsResponse { ///

A list of budgets.

#[serde(rename = "Budgets")] @@ -395,7 +394,7 @@ pub struct DescribeNotificationsForBudgetRequest { ///

Response of GetNotificationsForBudget

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeNotificationsForBudgetResponse { ///

The pagination token in the service response that indicates the next set of results that you can retrieve.

#[serde(rename = "NextToken")] @@ -431,7 +430,7 @@ pub struct DescribeSubscribersForNotificationRequest { ///

Response of DescribeSubscribersForNotification

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSubscribersForNotificationResponse { ///

The pagination token in the service response that indicates the next set of results that you can retrieve.

#[serde(rename = "NextToken")] @@ -524,7 +523,7 @@ pub struct UpdateBudgetRequest { ///

Response of UpdateBudget

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBudgetResponse {} ///

Request of UpdateNotification

@@ -546,7 +545,7 @@ pub struct UpdateNotificationRequest { ///

Response of UpdateNotification

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNotificationResponse {} ///

Request of UpdateSubscriber

@@ -571,7 +570,7 @@ pub struct UpdateSubscriberRequest { ///

Response of UpdateSubscriber

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSubscriberResponse {} /// Errors returned by CreateBudget @@ -1435,10 +1434,7 @@ impl BudgetsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> BudgetsClient { - BudgetsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1452,10 +1448,14 @@ impl BudgetsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - BudgetsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> BudgetsClient { + BudgetsClient { client, region } } } diff --git a/rusoto/services/ce/Cargo.toml b/rusoto/services/ce/Cargo.toml index d7f1eb63c0d..2241fe0f2d5 100644 --- a/rusoto/services/ce/Cargo.toml +++ b/rusoto/services/ce/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ce" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ce/README.md b/rusoto/services/ce/README.md index 4e86e9e9d05..6db9d5f48d2 100644 --- a/rusoto/services/ce/README.md +++ b/rusoto/services/ce/README.md @@ -23,9 +23,16 @@ To use `rusoto_ce` in your application, add it as a dependency in your `Cargo.to ```toml [dependencies] -rusoto_ce = "0.40.0" +rusoto_ce = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ce/src/custom/mod.rs b/rusoto/services/ce/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ce/src/custom/mod.rs +++ b/rusoto/services/ce/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ce/src/generated.rs b/rusoto/services/ce/src/generated.rs index 22361816a5d..4c80f895eff 100644 --- a/rusoto/services/ce/src/generated.rs +++ b/rusoto/services/ce/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

The amount of instance usage that a reservation covered.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Coverage { ///

The amount of cost that the reservation covered.

#[serde(rename = "CoverageCost")] @@ -44,7 +43,7 @@ pub struct Coverage { ///

Reservation coverage for a specified period, in hours.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CoverageByTime { ///

The groups of instances that the reservation covered.

#[serde(rename = "Groups")] @@ -62,7 +61,7 @@ pub struct CoverageByTime { ///

How much it cost to run an instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CoverageCost { ///

How much an On-Demand instance cost.

#[serde(rename = "OnDemandCost")] @@ -72,7 +71,7 @@ pub struct CoverageCost { ///

How long a running instance either used a reservation or was On-Demand.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CoverageHours { ///

The percentage of instance hours that a reservation covered.

#[serde(rename = "CoverageHoursPercentage")] @@ -94,7 +93,7 @@ pub struct CoverageHours { ///

The amount of instance usage, in normalized units. Normalized units enable you to see your EC2 usage for multiple sizes of instances in a uniform way. For example, suppose you run an xlarge instance and a 2xlarge instance. If you run both instances for the same amount of time, the 2xlarge instance uses twice as much of your reservation as the xlarge instance, even though both instances show only one instance-hour. Using normalized units instead of instance-hours, the xlarge instance used 8 normalized units, and the 2xlarge instance used 16 normalized units.

For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CoverageNormalizedUnits { ///

The percentage of your used instance normalized units that a reservation covers.

#[serde(rename = "CoverageNormalizedUnitsPercentage")] @@ -114,6 +113,48 @@ pub struct CoverageNormalizedUnits { pub total_running_normalized_units: Option, } +///

Context about the current instance.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CurrentInstance { + ///

The currency code that Amazon Web Services used to calculate the costs for this instance.

+ #[serde(rename = "CurrencyCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub currency_code: Option, + ///

Current On Demand cost of operating this instance on a monthly basis.

+ #[serde(rename = "MonthlyCost")] + #[serde(skip_serializing_if = "Option::is_none")] + pub monthly_cost: Option, + ///

Number of hours during the lookback period billed at On Demand rates.

+ #[serde(rename = "OnDemandHoursInLookbackPeriod")] + #[serde(skip_serializing_if = "Option::is_none")] + pub on_demand_hours_in_lookback_period: Option, + ///

Number of hours during the lookback period covered by reservations.

+ #[serde(rename = "ReservationCoveredHoursInLookbackPeriod")] + #[serde(skip_serializing_if = "Option::is_none")] + pub reservation_covered_hours_in_lookback_period: Option, + ///

Details about the resource and utilization.

+ #[serde(rename = "ResourceDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_details: Option, + ///

Resource ID of the current instance.

+ #[serde(rename = "ResourceId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id: Option, + ///

Utilization information of the current instance during the lookback period.

+ #[serde(rename = "ResourceUtilization")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_utilization: Option, + ///

Cost allocation resource tags applied to the instance.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The total number of hours the instance ran during the lookback period.

+ #[serde(rename = "TotalRunningHoursInLookbackPeriod")] + #[serde(skip_serializing_if = "Option::is_none")] + pub total_running_hours_in_lookback_period: Option, +} + ///

The time period that you want the usage and costs for.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DateInterval { @@ -140,7 +181,7 @@ pub struct DimensionValues { ///

The metadata of a specific type that you can use to filter and group your results. You can use GetDimensionValues to find specific values.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DimensionValuesWithAttributes { ///

The attribute that applies to a specific Dimension.

#[serde(rename = "Attributes")] @@ -154,7 +195,7 @@ pub struct DimensionValuesWithAttributes { ///

Details about the Amazon EC2 instances that AWS recommends that you purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EC2InstanceDetails { ///

The Availability Zone of the recommended reservation.

#[serde(rename = "AvailabilityZone")] @@ -190,6 +231,66 @@ pub struct EC2InstanceDetails { pub tenancy: Option, } +///

Details on the Amazon EC2 Resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EC2ResourceDetails { + ///

Hourly public On Demand rate for the instance type.

+ #[serde(rename = "HourlyOnDemandRate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub hourly_on_demand_rate: Option, + ///

The type of Amazon Web Services instance.

+ #[serde(rename = "InstanceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub instance_type: Option, + ///

Memory capacity of Amazon Web Services instance.

+ #[serde(rename = "Memory")] + #[serde(skip_serializing_if = "Option::is_none")] + pub memory: Option, + ///

Network performance capacity of the Amazon Web Services instance.

+ #[serde(rename = "NetworkPerformance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_performance: Option, + ///

The platform of the Amazon Web Services instance. The platform is the specific combination of operating system, license model, and software on an instance.

+ #[serde(rename = "Platform")] + #[serde(skip_serializing_if = "Option::is_none")] + pub platform: Option, + ///

The Amazon Web Services Region of the instance.

+ #[serde(rename = "Region")] + #[serde(skip_serializing_if = "Option::is_none")] + pub region: Option, + ///

The SKU of the product.

+ #[serde(rename = "Sku")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sku: Option, + ///

The disk storage of the Amazon Web Services instance (Not EBS storage).

+ #[serde(rename = "Storage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub storage: Option, + ///

Number of VCPU cores in the Amazon Web Services instance type.

+ #[serde(rename = "Vcpu")] + #[serde(skip_serializing_if = "Option::is_none")] + pub vcpu: Option, +} + +///

Utilization metrics of the instance.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EC2ResourceUtilization { + ///

Maximum observed or expected CPU utilization of the instance.

+ #[serde(rename = "MaxCpuUtilizationPercentage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_cpu_utilization_percentage: Option, + ///

Maximum observed or expected memory utilization of the instance.

+ #[serde(rename = "MaxMemoryUtilizationPercentage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_memory_utilization_percentage: Option, + ///

Maximum observed or expected storage utilization of the instance (does not measure EBS storage).

+ #[serde(rename = "MaxStorageUtilizationPercentage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_storage_utilization_percentage: Option, +} + ///

The Amazon EC2 hardware specifications that you want AWS to provide recommendations for.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EC2Specification { @@ -201,7 +302,7 @@ pub struct EC2Specification { ///

Details about the Amazon ES instances that AWS recommends that you purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ESInstanceDetails { ///

Whether the recommendation is for a current-generation instance.

#[serde(rename = "CurrentGeneration")] @@ -227,7 +328,7 @@ pub struct ESInstanceDetails { ///

Details about the Amazon ElastiCache instances that AWS recommends that you purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ElastiCacheInstanceDetails { ///

Whether the recommendation is for a current generation instance.

#[serde(rename = "CurrentGeneration")] @@ -255,7 +356,7 @@ pub struct ElastiCacheInstanceDetails { pub size_flex_eligible: Option, } -///

Use Expression to filter by cost or by usage. There are two patterns:

  • Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCETYPE==m4.xlarge OR INSTANCETYPE==c4.large. The Expression for that looks like this:

    { "Dimensions": { "Key": "INSTANCETYPE", "Values": [ "m4.xlarge", “c4.large” ] } }

    The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with methods or set methods in multiple lines.

  • Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCETYPE == m4.large OR INSTANCETYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGETYPE != DataTransfer). The Expression for that looks like this:

    { "And": [ {"Or": [ {"Dimensions": { "Key": "INSTANCETYPE", "Values": [ "m4.x.large", "c4.large" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": { "Key": "USAGETYPE", "Values": ["DataTransfer"] }}} ] }

    Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.

    { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", "Values": [ "DataTransfer" ] } }

+///

Use Expression to filter by cost or by usage. There are two patterns:

  • Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. The Expression for that looks like this:

    { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } }

    The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with methods or set methods in multiple lines.

  • Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGETYPE != DataTransfer). The Expression for that looks like this:

    { "And": [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": { "Key": "USAGETYPE", "Values": ["DataTransfer"] }}} ] }

    Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.

    { "And": [ ... ], "DimensionValues": { "Dimension": "USAGETYPE", "Values": [ "DataTransfer" ] } }

For GetRightsizingRecommendation action, a combination of OR and NOT is not supported. OR is not supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimentions are also limited to LINKEDACCOUNT, REGION, or RIGHTSIZING_TYPE.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Expression { ///

Return results that match both Dimension objects.

@@ -282,7 +383,7 @@ pub struct Expression { ///

The forecast created for your query.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ForecastResult { ///

The mean value of the forecast.

#[serde(rename = "MeanValue")] @@ -330,7 +431,7 @@ pub struct GetCostAndUsageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCostAndUsageResponse { ///

The groups that are specified by the Filter or GroupBy parameters in the request.

#[serde(rename = "GroupDefinitions")] @@ -355,7 +456,7 @@ pub struct GetCostForecastRequest { ///

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts.

The GetCostForecast operation supports only DAILY and MONTHLY granularities.

#[serde(rename = "Granularity")] pub granularity: String, - ///

Which metric Cost Explorer uses to create your forecast. For more information about blended and unblended rates, see Why does the "blended" annotation appear on some line items in my bill?.

Valid values for a GetCostForecast call are the following:

  • AmortizedCost

  • BlendedCost

  • NetAmortizedCost

  • NetUnblendedCost

  • UnblendedCost

+ ///

Which metric Cost Explorer uses to create your forecast. For more information about blended and unblended rates, see Why does the "blended" annotation appear on some line items in my bill?.

Valid values for a GetCostForecast call are the following:

  • AMORTIZEDCOST

  • BLENDEDCOST

  • NETAMORTIZEDCOST

  • NETUNBLENDEDCOST

  • UNBLENDED_COST

#[serde(rename = "Metric")] pub metric: String, ///

Cost Explorer always returns the mean forecast as a single point. You can request a prediction interval around the mean by specifying a confidence level. The higher the confidence level, the more confident Cost Explorer is about the actual value falling in the prediction interval. Higher confidence levels result in wider prediction intervals.

@@ -368,7 +469,7 @@ pub struct GetCostForecastRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCostForecastResponse { ///

The forecasts for your query, in order. For DAILY forecasts, this is a list of days. For MONTHLY forecasts, this is a list of months.

#[serde(rename = "ForecastResultsByTime")] @@ -403,7 +504,7 @@ pub struct GetDimensionValuesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDimensionValuesResponse { ///

The filters that you used to filter your request. Some dimensions are available only for a specific context.

If you set the context to COSTANDUSAGE, you can use the following dimensions for searching:

  • AZ - The Availability Zone. An example is us-east-1a.

  • DATABASEENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.

  • INSTANCETYPE - The type of Amazon EC2 instance. An example is m4.xlarge.

  • LEGALENTITYNAME - The name of the organization that sells you AWS services, such as Amazon Web Services.

  • LINKEDACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the AWS ID of the member account.

  • OPERATINGSYSTEM - The operating system. Examples are Windows or Linux.

  • OPERATION - The action performed. Examples include RunInstance and CreateBucket.

  • PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.

  • PURCHASETYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand Instances and Standard Reserved Instances.

  • SERVICE - The AWS service such as Amazon DynamoDB.

  • USAGETYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs.

  • USAGETYPEGROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute.

  • RECORDTYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.

If you set the context to RESERVATIONS, you can use the following dimensions for searching:

  • AZ - The Availability Zone. An example is us-east-1a.

  • CACHEENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.

  • DEPLOYMENTOPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ.

  • INSTANCETYPE - The type of Amazon EC2 instance. An example is m4.xlarge.

  • LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the AWS ID of the member account.

  • PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.

  • REGION - The AWS Region.

  • SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.

  • TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).

  • TENANCY - The tenancy of a resource. Examples are shared or dedicated.

#[serde(rename = "DimensionValues")] @@ -449,7 +550,7 @@ pub struct GetReservationCoverageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetReservationCoverageResponse { ///

The amount of time that your reservations covered.

#[serde(rename = "CoveragesByTime")] @@ -504,7 +605,7 @@ pub struct GetReservationPurchaseRecommendationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetReservationPurchaseRecommendationResponse { ///

Information about this specific recommendation call, such as the time stamp for when Cost Explorer generated this recommendation.

#[serde(rename = "Metadata")] @@ -544,7 +645,7 @@ pub struct GetReservationUtilizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetReservationUtilizationResponse { ///

The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.

#[serde(rename = "NextPageToken")] @@ -559,6 +660,45 @@ pub struct GetReservationUtilizationResponse { pub utilizations_by_time: Vec, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetRightsizingRecommendationRequest { + #[serde(rename = "Filter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + ///

The pagination token that indicates the next set of results that you want to retrieve.

+ #[serde(rename = "NextPageToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_page_token: Option, + ///

The number of recommendations that you want returned in a single response object.

+ #[serde(rename = "PageSize")] + #[serde(skip_serializing_if = "Option::is_none")] + pub page_size: Option, + ///

The specific service that you want recommendations for.

+ #[serde(rename = "Service")] + pub service: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetRightsizingRecommendationResponse { + ///

Information regarding this specific recommendation set.

+ #[serde(rename = "Metadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + ///

The token to retrieve the next set of results.

+ #[serde(rename = "NextPageToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_page_token: Option, + ///

Recommendations to rightsize resources.

+ #[serde(rename = "RightsizingRecommendations")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rightsizing_recommendations: Option>, + ///

Summary of this recommendation set.

+ #[serde(rename = "Summary")] + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetTagsRequest { ///

The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.

@@ -579,7 +719,7 @@ pub struct GetTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTagsResponse { ///

The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.

#[serde(rename = "NextPageToken")] @@ -596,9 +736,43 @@ pub struct GetTagsResponse { pub total_size: i64, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetUsageForecastRequest { + ///

The filters that you want to use to filter your forecast. Cost Explorer API supports all of the Cost Explorer filters.

+ #[serde(rename = "Filter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + ///

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts.

The GetUsageForecast operation supports only DAILY and MONTHLY granularities.

+ #[serde(rename = "Granularity")] + pub granularity: String, + ///

Which metric Cost Explorer uses to create your forecast.

Valid values for a GetUsageForecast call are the following:

  • USAGEQUANTITY

  • NORMALIZEDUSAGE_AMOUNT

+ #[serde(rename = "Metric")] + pub metric: String, + ///

Cost Explorer always returns the mean forecast as a single point. You can request a prediction interval around the mean by specifying a confidence level. The higher the confidence level, the more confident Cost Explorer is about the actual value falling in the prediction interval. Higher confidence levels result in wider prediction intervals.

+ #[serde(rename = "PredictionIntervalLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub prediction_interval_level: Option, + ///

The start and end dates of the period that you want to retrieve usage forecast for. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01, then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01.

+ #[serde(rename = "TimePeriod")] + pub time_period: DateInterval, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetUsageForecastResponse { + ///

The forecasts for your query, in order. For DAILY forecasts, this is a list of days. For MONTHLY forecasts, this is a list of months.

+ #[serde(rename = "ForecastResultsByTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub forecast_results_by_time: Option>, + ///

How much you're forecasted to use over the forecast period.

+ #[serde(rename = "Total")] + #[serde(skip_serializing_if = "Option::is_none")] + pub total: Option, +} + ///

One level of grouped data in the results.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Group { ///

The keys that are included in this group.

#[serde(rename = "Keys")] @@ -625,7 +799,7 @@ pub struct GroupDefinition { ///

Details about the instances that AWS recommends that you purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceDetails { ///

The Amazon EC2 instances that AWS recommends that you purchase.

#[serde(rename = "EC2InstanceDetails")] @@ -651,7 +825,7 @@ pub struct InstanceDetails { ///

The aggregated value for a metric.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MetricValue { ///

The actual number that represents the metric.

#[serde(rename = "Amount")] @@ -663,9 +837,19 @@ pub struct MetricValue { pub unit: Option, } +///

Details on the modification recommendation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ModifyRecommendationDetail { + ///

Identifies whether this instance type is the Amazon Web Services default recommendation.

+ #[serde(rename = "TargetInstances")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_instances: Option>, +} + ///

Details about the Amazon RDS instances that AWS recommends that you purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RDSInstanceDetails { ///

Whether the recommendation is for a current-generation instance.

#[serde(rename = "CurrentGeneration")] @@ -707,7 +891,7 @@ pub struct RDSInstanceDetails { ///

Details about the Amazon Redshift instances that AWS recommends that you purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RedshiftInstanceDetails { ///

Whether the recommendation is for a current-generation instance.

#[serde(rename = "CurrentGeneration")] @@ -733,7 +917,7 @@ pub struct RedshiftInstanceDetails { ///

The aggregated numbers for your reservation usage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationAggregates { ///

The monthly cost of your reservation, amortized over the reservation period.

#[serde(rename = "AmortizedRecurringFee")] @@ -795,7 +979,7 @@ pub struct ReservationAggregates { ///

A group of reservations that share a set of attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationCoverageGroup { ///

The attributes for this group of reservations.

#[serde(rename = "Attributes")] @@ -809,7 +993,7 @@ pub struct ReservationCoverageGroup { ///

A specific reservation that AWS recommends for purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationPurchaseRecommendation { ///

The account scope that AWS recommends that you purchase this instance for. For example, you can purchase this reservation for an entire organization in AWS Organizations.

#[serde(rename = "AccountScope")] @@ -843,7 +1027,7 @@ pub struct ReservationPurchaseRecommendation { ///

Details about your recommended reservation purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationPurchaseRecommendationDetail { ///

The account that this RI recommendation is for.

#[serde(rename = "AccountId")] @@ -925,7 +1109,7 @@ pub struct ReservationPurchaseRecommendationDetail { ///

Information about this specific recommendation, such as the time stamp for when AWS made a specific recommendation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationPurchaseRecommendationMetadata { ///

The time stamp for when AWS made this recommendation.

#[serde(rename = "GenerationTimestamp")] @@ -939,7 +1123,7 @@ pub struct ReservationPurchaseRecommendationMetadata { ///

A summary about this recommendation, such as the currency code, the amount that AWS estimates that you could save, and the total amount of reservation to purchase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationPurchaseRecommendationSummary { ///

The currency code used for this recommendation.

#[serde(rename = "CurrencyCode")] @@ -957,7 +1141,7 @@ pub struct ReservationPurchaseRecommendationSummary { ///

A group of reservations that share a set of attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationUtilizationGroup { ///

The attributes for this group of reservations.

#[serde(rename = "Attributes")] @@ -977,9 +1161,29 @@ pub struct ReservationUtilizationGroup { pub value: Option, } +///

Details on the resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ResourceDetails { + ///

Details on the Amazon EC2 resource.

+ #[serde(rename = "EC2ResourceDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ec2_resource_details: Option, +} + +///

Resource utilization of current resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ResourceUtilization { + ///

Utilization of current Amazon EC2 Instance

+ #[serde(rename = "EC2ResourceUtilization")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ec2_resource_utilization: Option, +} + ///

The result that is associated with a time period.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResultByTime { ///

Whether the result is estimated.

#[serde(rename = "Estimated")] @@ -999,6 +1203,72 @@ pub struct ResultByTime { pub total: Option<::std::collections::HashMap>, } +///

Recommendations to rightsize resources.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RightsizingRecommendation { + ///

The account that this recommendation is for.

+ #[serde(rename = "AccountId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_id: Option, + ///

Context regarding the current instance.

+ #[serde(rename = "CurrentInstance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub current_instance: Option, + ///

Details for modification recommendations.

+ #[serde(rename = "ModifyRecommendationDetail")] + #[serde(skip_serializing_if = "Option::is_none")] + pub modify_recommendation_detail: Option, + ///

Recommendation to either terminate or modify the resource.

+ #[serde(rename = "RightsizingType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rightsizing_type: Option, + ///

Details for termination recommendations.

+ #[serde(rename = "TerminateRecommendationDetail")] + #[serde(skip_serializing_if = "Option::is_none")] + pub terminate_recommendation_detail: Option, +} + +///

Metadata for this recommendation set.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RightsizingRecommendationMetadata { + ///

The time stamp for when Amazon Web Services made this recommendation.

+ #[serde(rename = "GenerationTimestamp")] + #[serde(skip_serializing_if = "Option::is_none")] + pub generation_timestamp: Option, + ///

How many days of previous usage that Amazon Web Services considers when making this recommendation.

+ #[serde(rename = "LookbackPeriodInDays")] + #[serde(skip_serializing_if = "Option::is_none")] + pub lookback_period_in_days: Option, + ///

The ID for this specific recommendation.

+ #[serde(rename = "RecommendationId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommendation_id: Option, +} + +///

Summary of rightsizing recommendations

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RightsizingRecommendationSummary { + ///

Estimated total savings resulting from modifications, on a monthly basis.

+ #[serde(rename = "EstimatedTotalMonthlySavingsAmount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub estimated_total_monthly_savings_amount: Option, + ///

The currency code that Amazon Web Services used to calculate the savings.

+ #[serde(rename = "SavingsCurrencyCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub savings_currency_code: Option, + ///

Savings percentage based on the recommended modifications, relative to the total On Demand costs associated with these instances.

+ #[serde(rename = "SavingsPercentage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub savings_percentage: Option, + ///

Total number of instance recommendations.

+ #[serde(rename = "TotalRecommendationCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub total_recommendation_count: Option, +} + ///

Hardware specifications for the service that you want recommendations for.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ServiceSpecification { @@ -1009,7 +1279,7 @@ pub struct ServiceSpecification { } ///

The values that are available for a tag.

-#[derive(Default, Debug, Clone, PartialEq, Serialize)] +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TagValues { ///

The key for the tag.

#[serde(rename = "Key")] @@ -1021,9 +1291,53 @@ pub struct TagValues { pub values: Option>, } +///

Details on recommended instance.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TargetInstance { + ///

The currency code that Amazon Web Services used to calculate the costs for this instance.

+ #[serde(rename = "CurrencyCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub currency_code: Option, + ///

Indicates whether or not this recommendation is the defaulted Amazon Web Services recommendation.

+ #[serde(rename = "DefaultTargetInstance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub default_target_instance: Option, + ///

Expected cost to operate this instance type on a monthly basis.

+ #[serde(rename = "EstimatedMonthlyCost")] + #[serde(skip_serializing_if = "Option::is_none")] + pub estimated_monthly_cost: Option, + ///

Estimated savings resulting from modification, on a monthly basis.

+ #[serde(rename = "EstimatedMonthlySavings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub estimated_monthly_savings: Option, + ///

Expected utilization metrics for target instance type.

+ #[serde(rename = "ExpectedResourceUtilization")] + #[serde(skip_serializing_if = "Option::is_none")] + pub expected_resource_utilization: Option, + ///

Details on the target instance type.

+ #[serde(rename = "ResourceDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_details: Option, +} + +///

Details on termination recommendation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TerminateRecommendationDetail { + ///

The currency code that Amazon Web Services used to calculate the costs for this instance.

+ #[serde(rename = "CurrencyCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub currency_code: Option, + ///

Estimated savings resulting from modification, on a monthly basis.

+ #[serde(rename = "EstimatedMonthlySavings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub estimated_monthly_savings: Option, +} + ///

The amount of utilization, in hours.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UtilizationByTime { ///

The groups that this utilization result uses.

#[serde(rename = "Groups")] @@ -1347,6 +1661,51 @@ impl Error for GetReservationUtilizationError { } } } +/// Errors returned by GetRightsizingRecommendation +#[derive(Debug, PartialEq)] +pub enum GetRightsizingRecommendationError { + ///

The pagination token is invalid. Try again without a pagination token.

+ InvalidNextToken(String), + ///

You made too many calls in a short period of time. Try again later.

+ LimitExceeded(String), +} + +impl GetRightsizingRecommendationError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidNextTokenException" => { + return RusotoError::Service( + GetRightsizingRecommendationError::InvalidNextToken(err.msg), + ) + } + "LimitExceededException" => { + return RusotoError::Service(GetRightsizingRecommendationError::LimitExceeded( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetRightsizingRecommendationError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetRightsizingRecommendationError { + fn description(&self) -> &str { + match *self { + GetRightsizingRecommendationError::InvalidNextToken(ref cause) => cause, + GetRightsizingRecommendationError::LimitExceeded(ref cause) => cause, + } + } +} /// Errors returned by GetTags #[derive(Debug, PartialEq)] pub enum GetTagsError { @@ -1404,6 +1763,53 @@ impl Error for GetTagsError { } } } +/// Errors returned by GetUsageForecast +#[derive(Debug, PartialEq)] +pub enum GetUsageForecastError { + ///

The requested data is unavailable.

+ DataUnavailable(String), + ///

You made too many calls in a short period of time. Try again later.

+ LimitExceeded(String), + ///

Cost Explorer was unable to identify the usage unit. Provide UsageType/UsageTypeGroup filter selections that contain matching units, for example: hours.

+ UnresolvableUsageUnit(String), +} + +impl GetUsageForecastError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "DataUnavailableException" => { + return RusotoError::Service(GetUsageForecastError::DataUnavailable(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetUsageForecastError::LimitExceeded(err.msg)) + } + "UnresolvableUsageUnitException" => { + return RusotoError::Service(GetUsageForecastError::UnresolvableUsageUnit( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetUsageForecastError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetUsageForecastError { + fn description(&self) -> &str { + match *self { + GetUsageForecastError::DataUnavailable(ref cause) => cause, + GetUsageForecastError::LimitExceeded(ref cause) => cause, + GetUsageForecastError::UnresolvableUsageUnit(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the AWS Cost Explorer API. AWS Cost Explorer clients implement this trait. pub trait CostExplorer { ///

Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Master accounts in an organization in AWS Organizations have access to all member accounts.

@@ -1445,8 +1851,20 @@ pub trait CostExplorer { input: GetReservationUtilizationRequest, ) -> RusotoFuture; + ///

Creates recommendations that helps you save cost by identifying idle and underutilized Amazon EC2 instances.

Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For details on calculation and function, see Optimizing Your Cost with Rightsizing Recommendations.

+ fn get_rightsizing_recommendation( + &self, + input: GetRightsizingRecommendationRequest, + ) -> RusotoFuture; + ///

Queries for available tag keys and tag values for a specified period. You can search the tag values for an arbitrary string.

fn get_tags(&self, input: GetTagsRequest) -> RusotoFuture; + + ///

Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage.

+ fn get_usage_forecast( + &self, + input: GetUsageForecastRequest, + ) -> RusotoFuture; } /// A client for the AWS Cost Explorer API. #[derive(Clone)] @@ -1460,10 +1878,7 @@ impl CostExplorerClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CostExplorerClient { - CostExplorerClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1477,10 +1892,14 @@ impl CostExplorerClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CostExplorerClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CostExplorerClient { + CostExplorerClient { client, region } } } @@ -1666,6 +2085,35 @@ impl CostExplorer for CostExplorerClient { }) } + ///

Creates recommendations that helps you save cost by identifying idle and underutilized Amazon EC2 instances.

Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For details on calculation and function, see Optimizing Your Cost with Rightsizing Recommendations.

+ fn get_rightsizing_recommendation( + &self, + input: GetRightsizingRecommendationRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ce", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AWSInsightsIndexService.GetRightsizingRecommendation", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetRightsizingRecommendationError::from_response(response)) + })) + } + }) + } + ///

Queries for available tag keys and tag values for a specified period. You can search the tag values for an arbitrary string.

fn get_tags(&self, input: GetTagsRequest) -> RusotoFuture { let mut request = SignedRequest::new("POST", "ce", &self.region, "/"); @@ -1690,4 +2138,33 @@ impl CostExplorer for CostExplorerClient { } }) } + + ///

Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage.

+ fn get_usage_forecast( + &self, + input: GetUsageForecastRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ce", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSInsightsIndexService.GetUsageForecast"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetUsageForecastError::from_response(response))), + ) + } + }) + } } diff --git a/rusoto/services/chime/Cargo.toml b/rusoto/services/chime/Cargo.toml index be04478e411..e38a3a33958 100644 --- a/rusoto/services/chime/Cargo.toml +++ b/rusoto/services/chime/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_chime" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/chime/README.md b/rusoto/services/chime/README.md index ce911ef0ad3..13a50503ce5 100644 --- a/rusoto/services/chime/README.md +++ b/rusoto/services/chime/README.md @@ -23,9 +23,16 @@ To use `rusoto_chime` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_chime = "0.40.0" +rusoto_chime = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/chime/src/custom/mod.rs b/rusoto/services/chime/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/chime/src/custom/mod.rs +++ b/rusoto/services/chime/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/chime/src/generated.rs b/rusoto/services/chime/src/generated.rs index ee527b93525..70a62b0fca0 100644 --- a/rusoto/services/chime/src/generated.rs +++ b/rusoto/services/chime/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

The Amazon Chime account details. An AWS account can have multiple Amazon Chime accounts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Account { ///

The Amazon Chime account ID.

#[serde(rename = "AccountId")] @@ -83,7 +82,7 @@ pub struct AssociatePhoneNumberWithUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociatePhoneNumberWithUserResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -98,7 +97,7 @@ pub struct AssociatePhoneNumbersWithVoiceConnectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociatePhoneNumbersWithVoiceConnectorResponse { ///

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

#[serde(rename = "PhoneNumberErrors")] @@ -114,7 +113,7 @@ pub struct BatchDeletePhoneNumberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeletePhoneNumberResponse { ///

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

#[serde(rename = "PhoneNumberErrors")] @@ -133,7 +132,7 @@ pub struct BatchSuspendUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchSuspendUserResponse { ///

If the BatchSuspendUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

#[serde(rename = "UserErrors")] @@ -152,7 +151,7 @@ pub struct BatchUnsuspendUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchUnsuspendUserResponse { ///

If the BatchUnsuspendUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

#[serde(rename = "UserErrors")] @@ -168,7 +167,7 @@ pub struct BatchUpdatePhoneNumberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchUpdatePhoneNumberResponse { ///

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

#[serde(rename = "PhoneNumberErrors")] @@ -187,7 +186,7 @@ pub struct BatchUpdateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchUpdateUserResponse { ///

If the BatchUpdateUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

#[serde(rename = "UserErrors")] @@ -197,7 +196,7 @@ pub struct BatchUpdateUserResponse { ///

A resource that allows Enterprise account administrators to configure an interface to receive events from Amazon Chime.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Bot { ///

The bot email address.

#[serde(rename = "BotEmail")] @@ -254,7 +253,7 @@ pub struct CreateAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAccountResponse { ///

The Amazon Chime account details.

#[serde(rename = "Account")] @@ -277,7 +276,7 @@ pub struct CreateBotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBotResponse { ///

The bot details.

#[serde(rename = "Bot")] @@ -296,7 +295,7 @@ pub struct CreatePhoneNumberOrderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePhoneNumberOrderResponse { ///

The phone number order details.

#[serde(rename = "PhoneNumberOrder")] @@ -315,7 +314,7 @@ pub struct CreateVoiceConnectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateVoiceConnectorResponse { ///

The Amazon Chime Voice Connector details.

#[serde(rename = "VoiceConnector")] @@ -344,7 +343,7 @@ pub struct DeleteAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAccountResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -407,7 +406,7 @@ pub struct DisassociatePhoneNumberFromUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociatePhoneNumberFromUserResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -422,7 +421,7 @@ pub struct DisassociatePhoneNumbersFromVoiceConnectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociatePhoneNumbersFromVoiceConnectorResponse { ///

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

#[serde(rename = "PhoneNumberErrors")] @@ -432,7 +431,7 @@ pub struct DisassociatePhoneNumbersFromVoiceConnectorResponse { ///

The configuration that allows a bot to receive outgoing events. Can be either an HTTPS endpoint or a Lambda function ARN.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventsConfiguration { ///

The bot ID.

#[serde(rename = "BotId")] @@ -456,7 +455,7 @@ pub struct GetAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAccountResponse { ///

The Amazon Chime account details.

#[serde(rename = "Account")] @@ -472,7 +471,7 @@ pub struct GetAccountSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAccountSettingsResponse { ///

The Amazon Chime account settings.

#[serde(rename = "AccountSettings")] @@ -491,7 +490,7 @@ pub struct GetBotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotResponse { ///

The chat bot details.

#[serde(rename = "Bot")] @@ -510,7 +509,7 @@ pub struct GetEventsConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetEventsConfigurationResponse { ///

The events configuration details.

#[serde(rename = "EventsConfiguration")] @@ -519,7 +518,7 @@ pub struct GetEventsConfigurationResponse { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGlobalSettingsResponse { ///

The Amazon Chime Business Calling settings.

#[serde(rename = "BusinessCalling")] @@ -539,7 +538,7 @@ pub struct GetPhoneNumberOrderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPhoneNumberOrderResponse { ///

The phone number order details.

#[serde(rename = "PhoneNumberOrder")] @@ -555,7 +554,7 @@ pub struct GetPhoneNumberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPhoneNumberResponse { ///

The phone number details.

#[serde(rename = "PhoneNumber")] @@ -574,7 +573,7 @@ pub struct GetUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserResponse { ///

The user details.

#[serde(rename = "User")] @@ -593,7 +592,7 @@ pub struct GetUserSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserSettingsResponse { ///

The user settings.

#[serde(rename = "UserSettings")] @@ -609,7 +608,7 @@ pub struct GetVoiceConnectorOriginationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVoiceConnectorOriginationResponse { ///

The origination setting details.

#[serde(rename = "Origination")] @@ -625,7 +624,7 @@ pub struct GetVoiceConnectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVoiceConnectorResponse { ///

The Amazon Chime Voice Connector details.

#[serde(rename = "VoiceConnector")] @@ -641,7 +640,7 @@ pub struct GetVoiceConnectorTerminationHealthRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVoiceConnectorTerminationHealthResponse { ///

The termination health details.

#[serde(rename = "TerminationHealth")] @@ -657,7 +656,7 @@ pub struct GetVoiceConnectorTerminationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVoiceConnectorTerminationResponse { ///

The termination setting details.

#[serde(rename = "Termination")] @@ -667,7 +666,7 @@ pub struct GetVoiceConnectorTerminationResponse { ///

Invitation object returned after emailing users to invite them to join the Amazon Chime Team account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Invite { ///

The email address to which the invite is sent.

#[serde(rename = "EmailAddress")] @@ -698,7 +697,7 @@ pub struct InviteUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InviteUsersResponse { ///

The invite details.

#[serde(rename = "Invites")] @@ -727,7 +726,7 @@ pub struct ListAccountsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAccountsResponse { ///

List of Amazon Chime accounts and account details.

#[serde(rename = "Accounts")] @@ -755,7 +754,7 @@ pub struct ListBotsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBotsResponse { ///

List of bots and bot details.

#[serde(rename = "Bots")] @@ -780,7 +779,7 @@ pub struct ListPhoneNumberOrdersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPhoneNumberOrdersResponse { ///

The token to use to retrieve the next page of results.

#[serde(rename = "NextToken")] @@ -821,7 +820,7 @@ pub struct ListPhoneNumbersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPhoneNumbersResponse { ///

The token to use to retrieve the next page of results.

#[serde(rename = "NextToken")] @@ -853,7 +852,7 @@ pub struct ListUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsersResponse { ///

The token to use to retrieve the next page of results.

#[serde(rename = "NextToken")] @@ -873,7 +872,7 @@ pub struct ListVoiceConnectorTerminationCredentialsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVoiceConnectorTerminationCredentialsResponse { ///

A list of user names.

#[serde(rename = "Usernames")] @@ -894,7 +893,7 @@ pub struct ListVoiceConnectorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVoiceConnectorsResponse { ///

The token to use to retrieve the next page of results.

#[serde(rename = "NextToken")] @@ -917,12 +916,12 @@ pub struct LogoutUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogoutUserResponse {} ///

A phone number for which an order has been placed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OrderedPhoneNumber { ///

The phone number, in E.164 format.

#[serde(rename = "E164PhoneNumber")] @@ -974,7 +973,7 @@ pub struct OriginationRoute { ///

A phone number used for Amazon Chime Business Calling or an Amazon Chime Voice Connector.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PhoneNumber { ///

The phone number associations.

#[serde(rename = "Associations")] @@ -1020,7 +1019,7 @@ pub struct PhoneNumber { ///

The phone number associations, such as Amazon Chime account ID, Amazon Chime user ID, or Amazon Chime Voice Connector ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PhoneNumberAssociation { ///

The timestamp of the phone number association, in ISO 8601 format.

#[serde(rename = "AssociatedTimestamp")] @@ -1038,7 +1037,7 @@ pub struct PhoneNumberAssociation { ///

The phone number capabilities, such as enabled inbound and outbound calling and text messaging.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PhoneNumberCapabilities { ///

Allows or denies inbound calling for the specified phone number.

#[serde(rename = "InboundCall")] @@ -1068,7 +1067,7 @@ pub struct PhoneNumberCapabilities { ///

If the phone number action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PhoneNumberError { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -1086,7 +1085,7 @@ pub struct PhoneNumberError { ///

The details of a phone number order created for Amazon Chime.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PhoneNumberOrder { ///

The phone number order creation timestamp, in ISO 8601 format.

#[serde(rename = "CreatedTimestamp")] @@ -1133,7 +1132,7 @@ pub struct PutEventsConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutEventsConfigurationResponse { #[serde(rename = "EventsConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1151,7 +1150,7 @@ pub struct PutVoiceConnectorOriginationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutVoiceConnectorOriginationResponse { ///

The updated origination setting details.

#[serde(rename = "Origination")] @@ -1181,7 +1180,7 @@ pub struct PutVoiceConnectorTerminationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutVoiceConnectorTerminationResponse { ///

The updated termination setting details.

#[serde(rename = "Termination")] @@ -1200,7 +1199,7 @@ pub struct RegenerateSecurityTokenRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegenerateSecurityTokenResponse { #[serde(rename = "Bot")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1218,7 +1217,7 @@ pub struct ResetPersonalPINRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetPersonalPINResponse { ///

The user details and new personal meeting PIN.

#[serde(rename = "User")] @@ -1234,7 +1233,7 @@ pub struct RestorePhoneNumberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestorePhoneNumberResponse { ///

The phone number details.

#[serde(rename = "PhoneNumber")] @@ -1275,7 +1274,7 @@ pub struct SearchAvailablePhoneNumbersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchAvailablePhoneNumbersResponse { ///

List of phone numbers, in E.164 format.

#[serde(rename = "E164PhoneNumbers")] @@ -1324,7 +1323,7 @@ pub struct Termination { ///

The termination health details, including the source IP address and timestamp of the last successful SIP OPTIONS message from your SIP infrastructure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminationHealth { ///

The source IP address.

#[serde(rename = "Source")] @@ -1348,7 +1347,7 @@ pub struct UpdateAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAccountResponse { ///

The updated Amazon Chime account details.

#[serde(rename = "Account")] @@ -1367,7 +1366,7 @@ pub struct UpdateAccountSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAccountSettingsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1385,7 +1384,7 @@ pub struct UpdateBotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBotResponse { ///

The updated bot details.

#[serde(rename = "Bot")] @@ -1427,7 +1426,7 @@ pub struct UpdatePhoneNumberRequestItem { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePhoneNumberResponse { ///

The updated phone number details.

#[serde(rename = "PhoneNumber")] @@ -1462,7 +1461,7 @@ pub struct UpdateUserRequestItem { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserResponse { ///

The updated user details.

#[serde(rename = "User")] @@ -1497,7 +1496,7 @@ pub struct UpdateVoiceConnectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateVoiceConnectorResponse { ///

The Amazon Chime Voice Connector details.

#[serde(rename = "VoiceConnector")] @@ -1507,7 +1506,7 @@ pub struct UpdateVoiceConnectorResponse { ///

The user on the Amazon Chime account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct User { ///

The Amazon Chime account ID.

#[serde(rename = "AccountId")] @@ -1556,7 +1555,7 @@ pub struct User { ///

The list of errors returned when errors are encountered during the BatchSuspendUser, BatchUnsuspendUser, or BatchUpdateUser actions. This includes user IDs, error codes, and error messages.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserError { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -1582,7 +1581,7 @@ pub struct UserSettings { ///

The Amazon Chime Voice Connector configuration, including outbound host name and encryption settings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VoiceConnector { ///

The Amazon Chime Voice Connector creation timestamp, in ISO 8601 format.

#[serde(rename = "CreatedTimestamp")] @@ -6299,10 +6298,7 @@ impl ChimeClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ChimeClient { - ChimeClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6316,10 +6312,14 @@ impl ChimeClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ChimeClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ChimeClient { + ChimeClient { client, region } } } diff --git a/rusoto/services/cloud9/Cargo.toml b/rusoto/services/cloud9/Cargo.toml index ef30d5c4fc5..756de271555 100644 --- a/rusoto/services/cloud9/Cargo.toml +++ b/rusoto/services/cloud9/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloud9" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloud9/README.md b/rusoto/services/cloud9/README.md index af5d73be679..0297b8e2e56 100644 --- a/rusoto/services/cloud9/README.md +++ b/rusoto/services/cloud9/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloud9` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_cloud9 = "0.40.0" +rusoto_cloud9 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloud9/src/custom/mod.rs b/rusoto/services/cloud9/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cloud9/src/custom/mod.rs +++ b/rusoto/services/cloud9/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cloud9/src/generated.rs b/rusoto/services/cloud9/src/generated.rs index 89eda8dcfa2..256b8085b7f 100644 --- a/rusoto/services/cloud9/src/generated.rs +++ b/rusoto/services/cloud9/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -55,7 +54,7 @@ pub struct CreateEnvironmentEC2Request { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEnvironmentEC2Result { ///

The ID of the environment that was created.

#[serde(rename = "environmentId")] @@ -77,7 +76,7 @@ pub struct CreateEnvironmentMembershipRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEnvironmentMembershipResult { ///

Information about the environment member that was added.

#[serde(rename = "membership")] @@ -96,7 +95,7 @@ pub struct DeleteEnvironmentMembershipRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEnvironmentMembershipResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -107,7 +106,7 @@ pub struct DeleteEnvironmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEnvironmentResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -135,7 +134,7 @@ pub struct DescribeEnvironmentMembershipsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEnvironmentMembershipsResult { ///

Information about the environment members for the environment.

#[serde(rename = "memberships")] @@ -155,7 +154,7 @@ pub struct DescribeEnvironmentStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEnvironmentStatusResult { ///

Any informational message about the status of the environment.

#[serde(rename = "message")] @@ -175,7 +174,7 @@ pub struct DescribeEnvironmentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEnvironmentsResult { ///

Information about the environments that are returned.

#[serde(rename = "environments")] @@ -185,7 +184,7 @@ pub struct DescribeEnvironmentsResult { ///

Information about an AWS Cloud9 development environment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Environment { ///

The Amazon Resource Name (ARN) of the environment.

#[serde(rename = "arn")] @@ -219,7 +218,7 @@ pub struct Environment { ///

Information about the current creation or deletion lifecycle state of an AWS Cloud9 development environment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentLifecycle { ///

If the environment failed to delete, the Amazon Resource Name (ARN) of the related AWS resource.

#[serde(rename = "failureResource")] @@ -237,7 +236,7 @@ pub struct EnvironmentLifecycle { ///

Information about an environment member for an AWS Cloud9 development environment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentMember { ///

The ID of the environment for the environment member.

#[serde(rename = "environmentId")] @@ -274,7 +273,7 @@ pub struct ListEnvironmentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEnvironmentsResult { ///

The list of environment identifiers.

#[serde(rename = "environmentIds")] @@ -300,7 +299,7 @@ pub struct UpdateEnvironmentMembershipRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEnvironmentMembershipResult { ///

Information about the environment member whose settings were changed.

#[serde(rename = "membership")] @@ -324,7 +323,7 @@ pub struct UpdateEnvironmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEnvironmentResult {} /// Errors returned by CreateEnvironmentEC2 @@ -1177,10 +1176,7 @@ impl Cloud9Client { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> Cloud9Client { - Cloud9Client { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1194,10 +1190,14 @@ impl Cloud9Client { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - Cloud9Client { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Cloud9Client { + Cloud9Client { client, region } } } diff --git a/rusoto/services/clouddirectory/Cargo.toml b/rusoto/services/clouddirectory/Cargo.toml index bdd2fa3c371..6369050e2e6 100644 --- a/rusoto/services/clouddirectory/Cargo.toml +++ b/rusoto/services/clouddirectory/Cargo.toml @@ -1,13 +1,13 @@ [package] authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] -description = "AWS SDK for Rust - Amazon CloudDirectory @ 2016-05-10" +description = "AWS SDK for Rust - Amazon CloudDirectory @ 2017-01-11" documentation = "https://docs.rs/rusoto_clouddirectory" keywords = ["AWS", "Amazon", "clouddirectory"] license = "MIT" name = "rusoto_clouddirectory" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/clouddirectory/README.md b/rusoto/services/clouddirectory/README.md index 0b861e9abb5..38092bc08ec 100644 --- a/rusoto/services/clouddirectory/README.md +++ b/rusoto/services/clouddirectory/README.md @@ -23,9 +23,16 @@ To use `rusoto_clouddirectory` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_clouddirectory = "0.40.0" +rusoto_clouddirectory = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/clouddirectory/src/custom/mod.rs b/rusoto/services/clouddirectory/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/clouddirectory/src/custom/mod.rs +++ b/rusoto/services/clouddirectory/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/clouddirectory/src/generated.rs b/rusoto/services/clouddirectory/src/generated.rs index 9925439532d..ec48d79de4a 100644 --- a/rusoto/services/clouddirectory/src/generated.rs +++ b/rusoto/services/clouddirectory/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -42,7 +41,7 @@ pub struct AddFacetToObjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddFacetToObjectResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -56,7 +55,7 @@ pub struct ApplySchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplySchemaResponse { ///

The applied schema ARN that is associated with the copied schema in the Directory. You can use this ARN to describe the schema information applied on this directory. For more information, see arns.

#[serde(rename = "AppliedSchemaArn")] @@ -85,7 +84,7 @@ pub struct AttachObjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachObjectResponse { ///

The attached ObjectIdentifier, which is the child ObjectIdentifier.

#[serde(rename = "AttachedObjectIdentifier")] @@ -107,7 +106,7 @@ pub struct AttachPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachPolicyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -124,7 +123,7 @@ pub struct AttachToIndexRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachToIndexResponse { ///

The ObjectIdentifier of the object that was attached to the index.

#[serde(rename = "AttachedObjectIdentifier")] @@ -152,7 +151,7 @@ pub struct AttachTypedLinkRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachTypedLinkResponse { ///

Returns a typed link specifier as output.

#[serde(rename = "TypedLinkSpecifier")] @@ -212,7 +211,7 @@ pub struct BatchAddFacetToObject { ///

The result of a batch add facet to object operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAddFacetToObjectResponse {} ///

Represents the output of an AttachObject operation.

@@ -231,7 +230,7 @@ pub struct BatchAttachObject { ///

Represents the output batch AttachObject response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAttachObjectResponse { ///

The ObjectIdentifier of the object that has been attached.

#[serde(rename = "attachedObjectIdentifier")] @@ -252,7 +251,7 @@ pub struct BatchAttachPolicy { ///

Represents the output of an AttachPolicy response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAttachPolicyResponse {} ///

Attaches the specified object to the specified index inside a BatchRead operation. For more information, see AttachToIndex and BatchReadRequest$Operations.

@@ -268,7 +267,7 @@ pub struct BatchAttachToIndex { ///

Represents the output of a AttachToIndex response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAttachToIndexResponse { ///

The ObjectIdentifier of the object that was attached to the index.

#[serde(rename = "AttachedObjectIdentifier")] @@ -295,7 +294,7 @@ pub struct BatchAttachTypedLink { ///

Represents the output of a AttachTypedLink response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAttachTypedLinkResponse { ///

Returns a typed link specifier as output.

#[serde(rename = "TypedLinkSpecifier")] @@ -306,7 +305,7 @@ pub struct BatchAttachTypedLinkResponse { ///

Creates an index object inside of a BatchRead operation. For more information, see CreateIndex and BatchReadRequest$Operations.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchCreateIndex { - ///

The batch reference name. See Batches for more information.

+ ///

The batch reference name. See Transaction Support for more information.

#[serde(rename = "BatchReferenceName")] #[serde(skip_serializing_if = "Option::is_none")] pub batch_reference_name: Option, @@ -328,7 +327,7 @@ pub struct BatchCreateIndex { ///

Represents the output of a CreateIndex response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchCreateIndexResponse { ///

The ObjectIdentifier of the index created by this operation.

#[serde(rename = "ObjectIdentifier")] @@ -339,7 +338,7 @@ pub struct BatchCreateIndexResponse { ///

Represents the output of a CreateObject operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchCreateObject { - ///

The batch reference name. See Batches for more information.

+ ///

The batch reference name. See Transaction Support for more information.

#[serde(rename = "BatchReferenceName")] #[serde(skip_serializing_if = "Option::is_none")] pub batch_reference_name: Option, @@ -361,7 +360,7 @@ pub struct BatchCreateObject { ///

Represents the output of a CreateObject response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchCreateObjectResponse { ///

The ID that is associated with the object.

#[serde(rename = "ObjectIdentifier")] @@ -379,7 +378,7 @@ pub struct BatchDeleteObject { ///

Represents the output of a DeleteObject response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteObjectResponse {} ///

Detaches the specified object from the specified index inside a BatchRead operation. For more information, see DetachFromIndex and BatchReadRequest$Operations.

@@ -395,7 +394,7 @@ pub struct BatchDetachFromIndex { ///

Represents the output of a DetachFromIndex response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetachFromIndexResponse { ///

The ObjectIdentifier of the object that was detached from the index.

#[serde(rename = "DetachedObjectIdentifier")] @@ -406,7 +405,7 @@ pub struct BatchDetachFromIndexResponse { ///

Represents the output of a DetachObject operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDetachObject { - ///

The batch reference name. See Batches for more information.

+ ///

The batch reference name. See Transaction Support for more information.

#[serde(rename = "BatchReferenceName")] #[serde(skip_serializing_if = "Option::is_none")] pub batch_reference_name: Option, @@ -420,7 +419,7 @@ pub struct BatchDetachObject { ///

Represents the output of a DetachObject response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetachObjectResponse { ///

The ObjectIdentifier of the detached object.

#[serde(rename = "detachedObjectIdentifier")] @@ -441,7 +440,7 @@ pub struct BatchDetachPolicy { ///

Represents the output of a DetachPolicy response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetachPolicyResponse {} ///

Detaches a typed link from a specified source and target object inside a BatchRead operation. For more information, see DetachTypedLink and BatchReadRequest$Operations.

@@ -454,7 +453,7 @@ pub struct BatchDetachTypedLink { ///

Represents the output of a DetachTypedLink response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetachTypedLinkResponse {} ///

Retrieves attributes that are associated with a typed link inside a BatchRead operation. For more information, see GetLinkAttributes and BatchReadRequest$Operations.

@@ -470,7 +469,7 @@ pub struct BatchGetLinkAttributes { ///

Represents the output of a GetLinkAttributes response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetLinkAttributesResponse { ///

The attributes that are associated with the typed link.

#[serde(rename = "Attributes")] @@ -494,7 +493,7 @@ pub struct BatchGetObjectAttributes { ///

Represents the output of a GetObjectAttributes response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetObjectAttributesResponse { ///

The attribute values that are associated with an object.

#[serde(rename = "Attributes")] @@ -512,7 +511,7 @@ pub struct BatchGetObjectInformation { ///

Represents the output of a GetObjectInformation response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetObjectInformationResponse { ///

The ObjectIdentifier of the specified object.

#[serde(rename = "ObjectIdentifier")] @@ -542,7 +541,7 @@ pub struct BatchListAttachedIndices { ///

Represents the output of a ListAttachedIndices response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListAttachedIndicesResponse { ///

The indices attached to the specified object.

#[serde(rename = "IndexAttachments")] @@ -580,7 +579,7 @@ pub struct BatchListIncomingTypedLinks { ///

Represents the output of a ListIncomingTypedLinks response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListIncomingTypedLinksResponse { ///

Returns one or more typed link specifiers as output.

#[serde(rename = "LinkSpecifiers")] @@ -614,7 +613,7 @@ pub struct BatchListIndex { ///

Represents the output of a ListIndex response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListIndexResponse { ///

The objects and indexed values attached to the index.

#[serde(rename = "IndexAttachments")] @@ -648,7 +647,7 @@ pub struct BatchListObjectAttributes { ///

Represents the output of a ListObjectAttributes response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListObjectAttributesResponse { ///

The attributes map that is associated with the object. AttributeArn is the key; attribute value is the value.

#[serde(rename = "Attributes")] @@ -678,7 +677,7 @@ pub struct BatchListObjectChildren { ///

Represents the output of a ListObjectChildren response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListObjectChildrenResponse { ///

The children structure, which is a map with the key as the LinkName and ObjectIdentifier as the value.

#[serde(rename = "Children")] @@ -708,7 +707,7 @@ pub struct BatchListObjectParentPaths { ///

Represents the output of a ListObjectParentPaths response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListObjectParentPathsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -720,6 +719,29 @@ pub struct BatchListObjectParentPathsResponse { pub path_to_object_identifiers_list: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchListObjectParents { + #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + #[serde(rename = "ObjectReference")] + pub object_reference: ObjectReference, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchListObjectParentsResponse { + #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + #[serde(rename = "ParentLinks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parent_links: Option>, +} + ///

Returns policies attached to an object in pagination fashion inside a BatchRead operation. For more information, see ListObjectPolicies and BatchReadRequest$Operations.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchListObjectPolicies { @@ -738,7 +760,7 @@ pub struct BatchListObjectPolicies { ///

Represents the output of a ListObjectPolicies response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListObjectPoliciesResponse { ///

A list of policy ObjectIdentifiers, that are attached to the object.

#[serde(rename = "AttachedPolicyIds")] @@ -776,7 +798,7 @@ pub struct BatchListOutgoingTypedLinks { ///

Represents the output of a ListOutgoingTypedLinks response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListOutgoingTypedLinksResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -806,7 +828,7 @@ pub struct BatchListPolicyAttachments { ///

Represents the output of a ListPolicyAttachments response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchListPolicyAttachmentsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -836,13 +858,13 @@ pub struct BatchLookupPolicy { ///

Represents the output of a LookupPolicy response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchLookupPolicyResponse { ///

The pagination token.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

+ ///

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

#[serde(rename = "PolicyToPathList")] #[serde(skip_serializing_if = "Option::is_none")] pub policy_to_path_list: Option>, @@ -850,7 +872,7 @@ pub struct BatchLookupPolicyResponse { ///

The batch read exception structure, which contains the exception type and message.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchReadException { ///

An exception message that is associated with the failure.

#[serde(rename = "Message")] @@ -881,7 +903,7 @@ pub struct BatchReadOperation { #[serde(rename = "ListAttachedIndices")] #[serde(skip_serializing_if = "Option::is_none")] pub list_attached_indices: Option, - ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

#[serde(rename = "ListIncomingTypedLinks")] #[serde(skip_serializing_if = "Option::is_none")] pub list_incoming_typed_links: Option, @@ -897,15 +919,18 @@ pub struct BatchReadOperation { #[serde(rename = "ListObjectChildren")] #[serde(skip_serializing_if = "Option::is_none")] pub list_object_children: Option, - ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

+ ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

#[serde(rename = "ListObjectParentPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub list_object_parent_paths: Option, + #[serde(rename = "ListObjectParents")] + #[serde(skip_serializing_if = "Option::is_none")] + pub list_object_parents: Option, ///

Returns policies attached to an object in pagination fashion.

#[serde(rename = "ListObjectPolicies")] #[serde(skip_serializing_if = "Option::is_none")] pub list_object_policies: Option, - ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

#[serde(rename = "ListOutgoingTypedLinks")] #[serde(skip_serializing_if = "Option::is_none")] pub list_outgoing_typed_links: Option, @@ -913,7 +938,7 @@ pub struct BatchReadOperation { #[serde(rename = "ListPolicyAttachments")] #[serde(skip_serializing_if = "Option::is_none")] pub list_policy_attachments: Option, - ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

+ ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

#[serde(rename = "LookupPolicy")] #[serde(skip_serializing_if = "Option::is_none")] pub lookup_policy: Option, @@ -921,7 +946,7 @@ pub struct BatchReadOperation { ///

Represents the output of a BatchRead response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchReadOperationResponse { ///

Identifies which operation in a batch has failed.

#[serde(rename = "ExceptionResponse")] @@ -948,7 +973,7 @@ pub struct BatchReadRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchReadResponse { ///

A list of all the responses for each batch read.

#[serde(rename = "Responses")] @@ -958,7 +983,7 @@ pub struct BatchReadResponse { ///

Represents the output of a BatchRead success response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchReadSuccessfulResponse { ///

The list of attributes to retrieve from the typed link.

#[serde(rename = "GetLinkAttributes")] @@ -976,7 +1001,7 @@ pub struct BatchReadSuccessfulResponse { #[serde(rename = "ListAttachedIndices")] #[serde(skip_serializing_if = "Option::is_none")] pub list_attached_indices: Option, - ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

#[serde(rename = "ListIncomingTypedLinks")] #[serde(skip_serializing_if = "Option::is_none")] pub list_incoming_typed_links: Option, @@ -992,15 +1017,18 @@ pub struct BatchReadSuccessfulResponse { #[serde(rename = "ListObjectChildren")] #[serde(skip_serializing_if = "Option::is_none")] pub list_object_children: Option, - ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

+ ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

#[serde(rename = "ListObjectParentPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub list_object_parent_paths: Option, + #[serde(rename = "ListObjectParents")] + #[serde(skip_serializing_if = "Option::is_none")] + pub list_object_parents: Option, ///

Returns policies attached to an object in pagination fashion.

#[serde(rename = "ListObjectPolicies")] #[serde(skip_serializing_if = "Option::is_none")] pub list_object_policies: Option, - ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

#[serde(rename = "ListOutgoingTypedLinks")] #[serde(skip_serializing_if = "Option::is_none")] pub list_outgoing_typed_links: Option, @@ -1008,7 +1036,7 @@ pub struct BatchReadSuccessfulResponse { #[serde(rename = "ListPolicyAttachments")] #[serde(skip_serializing_if = "Option::is_none")] pub list_policy_attachments: Option, - ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

+ ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

#[serde(rename = "LookupPolicy")] #[serde(skip_serializing_if = "Option::is_none")] pub lookup_policy: Option, @@ -1027,7 +1055,7 @@ pub struct BatchRemoveFacetFromObject { ///

An empty result that represents success.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchRemoveFacetFromObjectResponse {} ///

Updates a given typed link’s attributes inside a BatchRead operation. Attributes to be updated must not contribute to the typed link’s identity, as defined by its IdentityAttributeOrder. For more information, see UpdateLinkAttributes and BatchReadRequest$Operations.

@@ -1043,7 +1071,7 @@ pub struct BatchUpdateLinkAttributes { ///

Represents the output of a UpdateLinkAttributes response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchUpdateLinkAttributesResponse {} ///

Represents the output of a BatchUpdate operation.

@@ -1059,7 +1087,7 @@ pub struct BatchUpdateObjectAttributes { ///

Represents the output of a BatchUpdate response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchUpdateObjectAttributesResponse { ///

ID that is associated with the object.

#[serde(rename = "ObjectIdentifier")] @@ -1086,11 +1114,11 @@ pub struct BatchWriteOperation { #[serde(rename = "AttachToIndex")] #[serde(skip_serializing_if = "Option::is_none")] pub attach_to_index: Option, - ///

Attaches a typed link to a specified source and target object. For more information, see Typed link.

+ ///

Attaches a typed link to a specified source and target object. For more information, see Typed Links.

#[serde(rename = "AttachTypedLink")] #[serde(skip_serializing_if = "Option::is_none")] pub attach_typed_link: Option, - ///

Creates an index object. See Indexing for more information.

+ ///

Creates an index object. See Indexing and search for more information.

#[serde(rename = "CreateIndex")] #[serde(skip_serializing_if = "Option::is_none")] pub create_index: Option, @@ -1114,7 +1142,7 @@ pub struct BatchWriteOperation { #[serde(rename = "DetachPolicy")] #[serde(skip_serializing_if = "Option::is_none")] pub detach_policy: Option, - ///

Detaches a typed link from a specified source and target object. For more information, see Typed link.

+ ///

Detaches a typed link from a specified source and target object. For more information, see Typed Links.

#[serde(rename = "DetachTypedLink")] #[serde(skip_serializing_if = "Option::is_none")] pub detach_typed_link: Option, @@ -1134,7 +1162,7 @@ pub struct BatchWriteOperation { ///

Represents the output of a BatchWrite response operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchWriteOperationResponse { ///

The result of an add facet to object batch operation.

#[serde(rename = "AddFacetToObject")] @@ -1152,11 +1180,11 @@ pub struct BatchWriteOperationResponse { #[serde(rename = "AttachToIndex")] #[serde(skip_serializing_if = "Option::is_none")] pub attach_to_index: Option, - ///

Attaches a typed link to a specified source and target object. For more information, see Typed link.

+ ///

Attaches a typed link to a specified source and target object. For more information, see Typed Links.

#[serde(rename = "AttachTypedLink")] #[serde(skip_serializing_if = "Option::is_none")] pub attach_typed_link: Option, - ///

Creates an index object. See Indexing for more information.

+ ///

Creates an index object. See Indexing and search for more information.

#[serde(rename = "CreateIndex")] #[serde(skip_serializing_if = "Option::is_none")] pub create_index: Option, @@ -1180,7 +1208,7 @@ pub struct BatchWriteOperationResponse { #[serde(rename = "DetachPolicy")] #[serde(skip_serializing_if = "Option::is_none")] pub detach_policy: Option, - ///

Detaches a typed link from a specified source and target object. For more information, see Typed link.

+ ///

Detaches a typed link from a specified source and target object. For more information, see Typed Links.

#[serde(rename = "DetachTypedLink")] #[serde(skip_serializing_if = "Option::is_none")] pub detach_typed_link: Option, @@ -1209,7 +1237,7 @@ pub struct BatchWriteRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchWriteResponse { ///

A list of all the responses for each batch write.

#[serde(rename = "Responses")] @@ -1228,7 +1256,7 @@ pub struct CreateDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDirectoryResponse { ///

The ARN of the published schema in the Directory. Once a published schema is copied into the directory, it has its own ARN, which is referred to applied schema ARN. For more information, see arns.

#[serde(rename = "AppliedSchemaArn")] @@ -1250,19 +1278,24 @@ pub struct CreateFacetRequest { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option>, + ///

There are two different styles that you can define on any given facet, Static and Dynamic. For static facets, all attributes must be defined in the schema. For dynamic facets, attributes can be defined during data plane operations.

+ #[serde(rename = "FacetStyle")] + #[serde(skip_serializing_if = "Option::is_none")] + pub facet_style: Option, ///

The name of the Facet, which is unique for a given schema.

#[serde(rename = "Name")] pub name: String, - ///

Specifies whether a given object created from this facet is of type node, leaf node, policy or index.

  • Node: Can have multiple children but one parent.

  • Leaf node: Cannot have children but can have multiple parents.

  • Policy: Allows you to store a policy document and policy type. For more information, see Policies.

  • Index: Can be created with the Index API.

+ ///

Specifies whether a given object created from this facet is of type node, leaf node, policy or index.

  • Node: Can have multiple children but one parent.

  • Leaf node: Cannot have children but can have multiple parents.

  • Policy: Allows you to store a policy document and policy type. For more information, see Policies.

  • Index: Can be created with the Index API.

#[serde(rename = "ObjectType")] - pub object_type: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub object_type: Option, ///

The schema ARN in which the new Facet will be created. For more information, see arns.

#[serde(rename = "SchemaArn")] pub schema_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFacetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1287,7 +1320,7 @@ pub struct CreateIndexRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIndexResponse { ///

The ObjectIdentifier of the index created by this operation.

#[serde(rename = "ObjectIdentifier")] @@ -1318,7 +1351,7 @@ pub struct CreateObjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateObjectResponse { ///

The identifier that is associated with the object.

#[serde(rename = "ObjectIdentifier")] @@ -1334,7 +1367,7 @@ pub struct CreateSchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSchemaResponse { ///

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

#[serde(rename = "SchemaArn")] @@ -1353,7 +1386,7 @@ pub struct CreateTypedLinkFacetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTypedLinkFacetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1364,7 +1397,7 @@ pub struct DeleteDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDirectoryResponse { ///

The ARN of the deleted directory.

#[serde(rename = "DirectoryArn")] @@ -1382,7 +1415,7 @@ pub struct DeleteFacetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFacetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1396,7 +1429,7 @@ pub struct DeleteObjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteObjectResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1407,7 +1440,7 @@ pub struct DeleteSchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSchemaResponse { ///

The input ARN that is returned as part of the response. For more information, see arns.

#[serde(rename = "SchemaArn")] @@ -1426,7 +1459,7 @@ pub struct DeleteTypedLinkFacetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTypedLinkFacetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1443,7 +1476,7 @@ pub struct DetachFromIndexRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachFromIndexResponse { ///

The ObjectIdentifier of the object that was detached from the index.

#[serde(rename = "DetachedObjectIdentifier")] @@ -1465,7 +1498,7 @@ pub struct DetachObjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachObjectResponse { ///

The ObjectIdentifier that was detached from the object.

#[serde(rename = "DetachedObjectIdentifier")] @@ -1487,7 +1520,7 @@ pub struct DetachPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachPolicyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1502,7 +1535,7 @@ pub struct DetachTypedLinkRequest { ///

Directory structure that includes the directory name and directory ARN.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Directory { ///

The date and time when the directory was created.

#[serde(rename = "CreationDateTime")] @@ -1530,7 +1563,7 @@ pub struct DisableDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableDirectoryResponse { ///

The ARN of the directory that has been disabled.

#[serde(rename = "DirectoryArn")] @@ -1545,17 +1578,21 @@ pub struct EnableDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableDirectoryResponse { ///

The ARN of the enabled directory.

#[serde(rename = "DirectoryArn")] pub directory_arn: String, } -///

A structure that contains Name, ARN, Attributes, Rules, and ObjectTypes. See Facets for more information.

+///

A structure that contains Name, ARN, Attributes, Rules, and ObjectTypes. See Facets for more information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Facet { + ///

There are two different styles that you can define on any given facet, Static and Dynamic. For static facets, all attributes must be defined in the schema. For dynamic facets, attributes can be defined during data plane operations.

+ #[serde(rename = "FacetStyle")] + #[serde(skip_serializing_if = "Option::is_none")] + pub facet_style: Option, ///

The name of the Facet.

#[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1569,11 +1606,11 @@ pub struct Facet { ///

An attribute that is associated with the Facet.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct FacetAttribute { - ///

A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.

+ ///

A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.

#[serde(rename = "AttributeDefinition")] #[serde(skip_serializing_if = "Option::is_none")] pub attribute_definition: Option, - ///

An attribute reference that is associated with the attribute. See Attribute References for more information.

+ ///

An attribute reference that is associated with the attribute. See Attribute References for more information.

#[serde(rename = "AttributeReference")] #[serde(skip_serializing_if = "Option::is_none")] pub attribute_reference: Option, @@ -1586,7 +1623,7 @@ pub struct FacetAttribute { pub required_behavior: Option, } -///

A facet attribute definition. See Attribute References for more information.

+///

A facet attribute definition. See Attribute References for more information.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct FacetAttributeDefinition { ///

The default value of the attribute (if configured).

@@ -1609,10 +1646,10 @@ pub struct FacetAttributeDefinition { ///

The facet attribute reference that specifies the attribute definition that contains the attribute facet name and attribute name.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct FacetAttributeReference { - ///

The target attribute name that is associated with the facet reference. See Attribute References for more information.

+ ///

The target attribute name that is associated with the facet reference. See Attribute References for more information.

#[serde(rename = "TargetAttributeName")] pub target_attribute_name: String, - ///

The target facet name that is associated with the facet reference. See Attribute References for more information.

+ ///

The target facet name that is associated with the facet reference. See Attribute References for more information.

#[serde(rename = "TargetFacetName")] pub target_facet_name: String, } @@ -1638,7 +1675,7 @@ pub struct GetAppliedSchemaVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAppliedSchemaVersionResponse { ///

Current applied schema ARN, including the minor version in use if one was provided.

#[serde(rename = "AppliedSchemaArn")] @@ -1654,7 +1691,7 @@ pub struct GetDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDirectoryResponse { ///

Metadata about the directory.

#[serde(rename = "Directory")] @@ -1672,7 +1709,7 @@ pub struct GetFacetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFacetResponse { ///

The Facet structure that is associated with the facet.

#[serde(rename = "Facet")] @@ -1689,7 +1726,7 @@ pub struct GetLinkAttributesRequest { #[serde(rename = "ConsistencyLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub consistency_level: Option, - ///

The Amazon Resource Name (ARN) that is associated with the Directory where the typed link resides. For more information, see arns or Typed link.

+ ///

The Amazon Resource Name (ARN) that is associated with the Directory where the typed link resides. For more information, see arns or Typed Links.

#[serde(rename = "DirectoryArn")] pub directory_arn: String, ///

Allows a typed link specifier to be accepted as input.

@@ -1698,7 +1735,7 @@ pub struct GetLinkAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLinkAttributesResponse { ///

The attributes that are associated with the typed link.

#[serde(rename = "Attributes")] @@ -1727,7 +1764,7 @@ pub struct GetObjectAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetObjectAttributesResponse { ///

The attributes that are associated with the object.

#[serde(rename = "Attributes")] @@ -1750,7 +1787,7 @@ pub struct GetObjectInformationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetObjectInformationResponse { ///

The ObjectIdentifier of the specified object.

#[serde(rename = "ObjectIdentifier")] @@ -1770,7 +1807,7 @@ pub struct GetSchemaAsJsonRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSchemaAsJsonResponse { ///

The JSON representation of the schema document.

#[serde(rename = "Document")] @@ -1793,9 +1830,9 @@ pub struct GetTypedLinkFacetInformationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTypedLinkFacetInformationResponse { - ///

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed link.

+ ///

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed Links.

#[serde(rename = "IdentityAttributeOrder")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_attribute_order: Option>, @@ -1803,7 +1840,7 @@ pub struct GetTypedLinkFacetInformationResponse { ///

Represents an index and an attached object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IndexAttachment { ///

The indexed attribute values.

#[serde(rename = "IndexedAttributes")] @@ -1861,7 +1898,7 @@ pub struct ListAppliedSchemaArnsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAppliedSchemaArnsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -1896,7 +1933,7 @@ pub struct ListAttachedIndicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAttachedIndicesResponse { ///

The indices attached to the specified object.

#[serde(rename = "IndexAttachments")] @@ -1921,7 +1958,7 @@ pub struct ListDevelopmentSchemaArnsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevelopmentSchemaArnsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -1950,7 +1987,7 @@ pub struct ListDirectoriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDirectoriesResponse { ///

Lists all directories that are associated with your account in pagination fashion.

#[serde(rename = "Directories")] @@ -1980,7 +2017,7 @@ pub struct ListFacetAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFacetAttributesResponse { ///

The attributes attached to the facet.

#[serde(rename = "Attributes")] @@ -2008,7 +2045,7 @@ pub struct ListFacetNamesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFacetNamesResponse { ///

The names of facets that exist within the schema.

#[serde(rename = "FacetNames")] @@ -2051,7 +2088,7 @@ pub struct ListIncomingTypedLinksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIncomingTypedLinksResponse { ///

Returns one or more typed link specifiers as output.

#[serde(rename = "LinkSpecifiers")] @@ -2075,7 +2112,7 @@ pub struct ListIndexRequest { ///

The reference to the index to list.

#[serde(rename = "IndexReference")] pub index_reference: ObjectReference, - ///

The maximum number of objects in a single page to retrieve from the index during a request. For more information, see AWS Directory Service Limits.

+ ///

The maximum number of objects in a single page to retrieve from the index during a request. For more information, see Amazon Cloud Directory Limits.

#[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option, @@ -2090,7 +2127,7 @@ pub struct ListIndexRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIndexResponse { ///

The objects and indexed values attached to the index.

#[serde(rename = "IndexAttachments")] @@ -2102,6 +2139,35 @@ pub struct ListIndexResponse { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListManagedSchemaArnsRequest { + ///

The maximum number of results to retrieve.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The pagination token.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The response for ListManagedSchemaArns. When this parameter is used, all minor version ARNs for a major version are listed.

+ #[serde(rename = "SchemaArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_arn: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListManagedSchemaArnsResponse { + ///

The pagination token.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The ARNs for all AWS managed schemas.

+ #[serde(rename = "SchemaArns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_arns: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListObjectAttributesRequest { ///

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

@@ -2129,7 +2195,7 @@ pub struct ListObjectAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListObjectAttributesResponse { ///

Attributes map that is associated with the object. AttributeArn is the key, and attribute value is the value.

#[serde(rename = "Attributes")] @@ -2164,7 +2230,7 @@ pub struct ListObjectChildrenRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListObjectChildrenResponse { ///

Children structure, which is a map with key as the LinkName and ObjectIdentifier as the value.

#[serde(rename = "Children")] @@ -2195,7 +2261,7 @@ pub struct ListObjectParentPathsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListObjectParentPathsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -2216,6 +2282,10 @@ pub struct ListObjectParentsRequest { ///

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

#[serde(rename = "DirectoryArn")] pub directory_arn: String, + ///

When set to True, returns all ListObjectParentsResponse$ParentLinks. There could be multiple links between a parent-child pair.

+ #[serde(rename = "IncludeAllLinksToEachParent")] + #[serde(skip_serializing_if = "Option::is_none")] + pub include_all_links_to_each_parent: Option, ///

The maximum number of items to be retrieved in a single call. This is an approximate number.

#[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2230,12 +2300,16 @@ pub struct ListObjectParentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListObjectParentsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, + ///

Returns a list of parent reference and LinkName Tuples.

+ #[serde(rename = "ParentLinks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parent_links: Option>, ///

The parent structure, which is a map with key as the ObjectIdentifier and LinkName as the value.

#[serde(rename = "Parents")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2265,7 +2339,7 @@ pub struct ListObjectPoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListObjectPoliciesResponse { ///

A list of policy ObjectIdentifiers, that are attached to the object.

#[serde(rename = "AttachedPolicyIds")] @@ -2308,7 +2382,7 @@ pub struct ListOutgoingTypedLinksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOutgoingTypedLinksResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -2343,7 +2417,7 @@ pub struct ListPolicyAttachmentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPolicyAttachmentsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -2372,7 +2446,7 @@ pub struct ListPublishedSchemaArnsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPublishedSchemaArnsResponse { ///

The pagination token.

#[serde(rename = "NextToken")] @@ -2400,7 +2474,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "NextToken")] @@ -2431,7 +2505,7 @@ pub struct ListTypedLinkFacetAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTypedLinkFacetAttributesResponse { ///

An ordered set of attributes associate with the typed link.

#[serde(rename = "Attributes")] @@ -2459,7 +2533,7 @@ pub struct ListTypedLinkFacetNamesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTypedLinkFacetNamesResponse { ///

The names of typed link facets that exist within the schema.

#[serde(rename = "FacetNames")] @@ -2490,13 +2564,13 @@ pub struct LookupPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LookupPolicyResponse { ///

The pagination token.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

+ ///

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

#[serde(rename = "PolicyToPathList")] #[serde(skip_serializing_if = "Option::is_none")] pub policy_to_path_list: Option>, @@ -2541,10 +2615,24 @@ pub struct ObjectAttributeUpdate { pub object_attribute_key: Option, } +///

A pair of ObjectIdentifier and LinkName.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ObjectIdentifierAndLinkNameTuple { + ///

The name of the link between the parent and the child object.

+ #[serde(rename = "LinkName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub link_name: Option, + ///

The ID that is associated with the object.

+ #[serde(rename = "ObjectIdentifier")] + #[serde(skip_serializing_if = "Option::is_none")] + pub object_identifier: Option, +} + ///

The reference that identifies an object.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ObjectReference { - ///

A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects. You can identify an object in one of the following ways:

  • $ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object’s identifier is immutable and no two objects will ever share the same object identifier

  • /some/path - Identifies the object based on path

  • #SomeBatchReference - Identifies the object in a batch call

+ ///

A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Access Objects. You can identify an object in one of the following ways:

  • $ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object’s identifier is immutable and no two objects will ever share the same object identifier

  • /some/path - Identifies the object based on path

  • #SomeBatchReference - Identifies the object in a batch call

#[serde(rename = "Selector")] #[serde(skip_serializing_if = "Option::is_none")] pub selector: Option, @@ -2552,7 +2640,7 @@ pub struct ObjectReference { ///

Returns the path to the ObjectIdentifiers that is associated with the directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PathToObjectIdentifiers { ///

Lists ObjectIdentifiers starting from directory root to the object in the request.

#[serde(rename = "ObjectIdentifiers")] @@ -2564,9 +2652,9 @@ pub struct PathToObjectIdentifiers { pub path: Option, } -///

Contains the PolicyType, PolicyId, and the ObjectIdentifier to which it is attached. For more information, see Policies.

+///

Contains the PolicyType, PolicyId, and the ObjectIdentifier to which it is attached. For more information, see Policies.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyAttachment { ///

The ObjectIdentifier that is associated with PolicyAttachment.

#[serde(rename = "ObjectIdentifier")] @@ -2584,7 +2672,7 @@ pub struct PolicyAttachment { ///

Used when a regular object exists in a Directory and you want to find all of the policies that are associated with that object and the parent to that object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyToPath { ///

The path that is referenced from the root.

#[serde(rename = "Path")] @@ -2615,7 +2703,7 @@ pub struct PublishSchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PublishSchemaResponse { ///

The ARN that is associated with the published schema. For more information, see arns.

#[serde(rename = "PublishedSchemaArn")] @@ -2634,7 +2722,7 @@ pub struct PutSchemaFromJsonRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutSchemaFromJsonResponse { ///

The ARN of the schema to update.

#[serde(rename = "Arn")] @@ -2656,7 +2744,7 @@ pub struct RemoveFacetFromObjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveFacetFromObjectResponse {} ///

Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.

@@ -2679,7 +2767,7 @@ pub struct SchemaFacet { #[serde(rename = "FacetName")] #[serde(skip_serializing_if = "Option::is_none")] pub facet_name: Option, - ///

The ARN of the schema that contains the facet with no minor component. See arns and In-Place Schema Upgrade for a description of when to provide minor versions.

+ ///

The ARN of the schema that contains the facet with no minor component. See arns and In-Place Schema Upgrade for a description of when to provide minor versions.

#[serde(rename = "SchemaArn")] #[serde(skip_serializing_if = "Option::is_none")] pub schema_arn: Option, @@ -2709,7 +2797,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Represents the data for a typed attribute. You can set one, and only one, of the elements. Each attribute in an item is a name-value pair. Attributes have a single value.

@@ -2742,7 +2830,7 @@ pub struct TypedAttributeValue { pub string_value: Option, } -///

A range of attribute values. For more information, see Range Filters.

+///

A range of attribute values. For more information, see Range Filters.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TypedAttributeValueRange { ///

The inclusive or exclusive range end.

@@ -2863,7 +2951,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2885,7 +2973,7 @@ pub struct UpdateFacetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFacetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2893,7 +2981,7 @@ pub struct UpdateLinkAttributesRequest { ///

The attributes update structure.

#[serde(rename = "AttributeUpdates")] pub attribute_updates: Vec, - ///

The Amazon Resource Name (ARN) that is associated with the Directory where the updated typed link resides. For more information, see arns or Typed link.

+ ///

The Amazon Resource Name (ARN) that is associated with the Directory where the updated typed link resides. For more information, see arns or Typed Links.

#[serde(rename = "DirectoryArn")] pub directory_arn: String, ///

Allows a typed link specifier to be accepted as input.

@@ -2902,7 +2990,7 @@ pub struct UpdateLinkAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateLinkAttributesResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2919,7 +3007,7 @@ pub struct UpdateObjectAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateObjectAttributesResponse { ///

The ObjectIdentifier of the updated object.

#[serde(rename = "ObjectIdentifier")] @@ -2938,7 +3026,7 @@ pub struct UpdateSchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSchemaResponse { ///

The ARN that is associated with the updated schema. For more information, see arns.

#[serde(rename = "SchemaArn")] @@ -2951,7 +3039,7 @@ pub struct UpdateTypedLinkFacetRequest { ///

Attributes update structure.

#[serde(rename = "AttributeUpdates")] pub attribute_updates: Vec, - ///

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to a typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed link.

+ ///

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to a typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed Links.

#[serde(rename = "IdentityAttributeOrder")] pub identity_attribute_order: Vec, ///

The unique name of the typed link facet.

@@ -2963,7 +3051,7 @@ pub struct UpdateTypedLinkFacetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTypedLinkFacetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2981,7 +3069,7 @@ pub struct UpgradeAppliedSchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpgradeAppliedSchemaResponse { ///

The ARN of the directory that is returned as part of the response.

#[serde(rename = "DirectoryArn")] @@ -3011,7 +3099,7 @@ pub struct UpgradePublishedSchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpgradePublishedSchemaResponse { ///

The ARN of the upgraded schema that is returned as part of the response.

#[serde(rename = "UpgradedSchemaArn")] @@ -3032,7 +3120,7 @@ pub enum AddFacetToObjectError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -3105,14 +3193,16 @@ pub enum ApplySchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

+ ///

Indicates that an attempt to make an attachment was invalid. For example, attaching two nodes with a link type that is not applicable to the nodes or attempting to apply a schema to a directory a second time.

InvalidAttachment(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), + ///

Indicates that a schema could not be created due to a naming conflict. Please select a different name and then try again.

+ SchemaAlreadyExists(String), } impl ApplySchemaError { @@ -3140,6 +3230,9 @@ impl ApplySchemaError { "RetryableConflictException" => { return RusotoError::Service(ApplySchemaError::RetryableConflict(err.msg)) } + "SchemaAlreadyExistsException" => { + return RusotoError::Service(ApplySchemaError::SchemaAlreadyExists(err.msg)) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3162,6 +3255,7 @@ impl Error for ApplySchemaError { ApplySchemaError::LimitExceeded(ref cause) => cause, ApplySchemaError::ResourceNotFound(ref cause) => cause, ApplySchemaError::RetryableConflict(ref cause) => cause, + ApplySchemaError::SchemaAlreadyExists(ref cause) => cause, } } } @@ -3178,9 +3272,9 @@ pub enum AttachObjectError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

+ ///

Indicates that an attempt to make an attachment was invalid. For example, attaching two nodes with a link type that is not applicable to the nodes or attempting to apply a schema to a directory a second time.

InvalidAttachment(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that a link could not be created due to a naming conflict. Choose a different name and then try again.

LinkNameAlreadyInUse(String), @@ -3263,7 +3357,7 @@ pub enum AttachPolicyError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that the requested operation can only operate on policy objects.

NotPolicy(String), @@ -3340,9 +3434,9 @@ pub enum AttachToIndexError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

+ ///

Indicates that an attempt to make an attachment was invalid. For example, attaching two nodes with a link type that is not applicable to the nodes or attempting to apply a schema to a directory a second time.

InvalidAttachment(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that a link could not be created due to a naming conflict. Choose a different name and then try again.

LinkNameAlreadyInUse(String), @@ -3435,9 +3529,9 @@ pub enum AttachTypedLinkError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

+ ///

Indicates that an attempt to make an attachment was invalid. For example, attaching two nodes with a link type that is not applicable to the nodes or attempting to apply a schema to a directory a second time.

InvalidAttachment(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -3514,7 +3608,7 @@ pub enum BatchReadError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), @@ -3579,7 +3673,7 @@ pub enum BatchWriteError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), @@ -3646,7 +3740,7 @@ pub enum CreateDirectoryError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -3721,7 +3815,7 @@ pub enum CreateFacetError { InvalidArn(String), ///

Occurs when any of the rule parameter keys or values are invalid.

InvalidRule(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -3800,7 +3894,7 @@ pub enum CreateIndexError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that a link could not be created due to a naming conflict. Choose a different name and then try again.

LinkNameAlreadyInUse(String), @@ -3887,7 +3981,7 @@ pub enum CreateObjectError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that a link could not be created due to a naming conflict. Choose a different name and then try again.

LinkNameAlreadyInUse(String), @@ -3970,7 +4064,7 @@ pub enum CreateSchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), @@ -4039,7 +4133,7 @@ pub enum CreateTypedLinkFacetError { InvalidArn(String), ///

Occurs when any of the rule parameter keys or values are invalid.

InvalidRule(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4128,7 +4222,7 @@ pub enum DeleteDirectoryError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4205,7 +4299,7 @@ pub enum DeleteFacetError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4278,7 +4372,7 @@ pub enum DeleteObjectError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that the requested operation cannot be completed because the object has not been detached from the tree.

ObjectNotDetached(String), @@ -4351,7 +4445,7 @@ pub enum DeleteSchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4422,7 +4516,7 @@ pub enum DeleteTypedLinkFacetError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4497,7 +4591,7 @@ pub enum DetachFromIndexError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that the requested operation can only operate on index objects.

NotIndex(String), @@ -4580,7 +4674,7 @@ pub enum DetachObjectError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when any invalid operations are performed on an object that is not a node, such as calling ListObjectChildren for a leaf node object.

NotNode(String), @@ -4655,7 +4749,7 @@ pub enum DetachPolicyError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that the requested operation can only operate on policy objects.

NotPolicy(String), @@ -4732,7 +4826,7 @@ pub enum DetachTypedLinkError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4805,7 +4899,7 @@ pub enum DisableDirectoryError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4874,7 +4968,7 @@ pub enum EnableDirectoryError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -4941,7 +5035,7 @@ pub enum GetAppliedSchemaVersionError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5014,7 +5108,7 @@ pub enum GetDirectoryError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), @@ -5073,7 +5167,7 @@ pub enum GetFacetError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5144,7 +5238,7 @@ pub enum GetLinkAttributesError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5221,7 +5315,7 @@ pub enum GetObjectAttributesError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5300,7 +5394,7 @@ pub enum GetObjectInformationError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5375,7 +5469,7 @@ pub enum GetSchemaAsJsonError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5442,7 +5536,7 @@ pub enum GetTypedLinkFacetInformationError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5533,7 +5627,7 @@ pub enum ListAppliedSchemaArnsError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5610,7 +5704,7 @@ pub enum ListAttachedIndicesError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5685,7 +5779,7 @@ pub enum ListDevelopmentSchemaArnsError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5768,7 +5862,7 @@ pub enum ListDirectoriesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), @@ -5833,7 +5927,7 @@ pub enum ListFacetAttributesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5912,7 +6006,7 @@ pub enum ListFacetNamesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -5985,7 +6079,7 @@ pub enum ListIncomingTypedLinksError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6080,7 +6174,7 @@ pub enum ListIndexError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that the requested operation can only operate on index objects.

NotIndex(String), @@ -6152,6 +6246,69 @@ impl Error for ListIndexError { } } } +/// Errors returned by ListManagedSchemaArns +#[derive(Debug, PartialEq)] +pub enum ListManagedSchemaArnsError { + ///

Access denied. Check your permissions.

+ AccessDenied(String), + ///

Indicates a problem that must be resolved by Amazon Web Services. This might be a transient error in which case you can retry your request until it succeeds. Otherwise, go to the AWS Service Health Dashboard site to see if there are any operational issues with the service.

+ InternalService(String), + ///

Indicates that the provided ARN value is not valid.

+ InvalidArn(String), + ///

Indicates that the NextToken value is not valid.

+ InvalidNextToken(String), + ///

The specified resource could not be found.

+ ResourceNotFound(String), +} + +impl ListManagedSchemaArnsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(ListManagedSchemaArnsError::AccessDenied(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(ListManagedSchemaArnsError::InternalService( + err.msg, + )) + } + "InvalidArnException" => { + return RusotoError::Service(ListManagedSchemaArnsError::InvalidArn(err.msg)) + } + "InvalidNextTokenException" => { + return RusotoError::Service(ListManagedSchemaArnsError::InvalidNextToken( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListManagedSchemaArnsError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListManagedSchemaArnsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListManagedSchemaArnsError { + fn description(&self) -> &str { + match *self { + ListManagedSchemaArnsError::AccessDenied(ref cause) => cause, + ListManagedSchemaArnsError::InternalService(ref cause) => cause, + ListManagedSchemaArnsError::InvalidArn(ref cause) => cause, + ListManagedSchemaArnsError::InvalidNextToken(ref cause) => cause, + ListManagedSchemaArnsError::ResourceNotFound(ref cause) => cause, + } + } +} /// Errors returned by ListObjectAttributes #[derive(Debug, PartialEq)] pub enum ListObjectAttributesError { @@ -6167,7 +6324,7 @@ pub enum ListObjectAttributesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6258,7 +6415,7 @@ pub enum ListObjectChildrenError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when any invalid operations are performed on an object that is not a node, such as calling ListObjectChildren for a leaf node object.

NotNode(String), @@ -6343,7 +6500,7 @@ pub enum ListObjectParentPathsError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6430,7 +6587,7 @@ pub enum ListObjectParentsError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6513,7 +6670,7 @@ pub enum ListObjectPoliciesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6594,7 +6751,7 @@ pub enum ListOutgoingTypedLinksError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6687,7 +6844,7 @@ pub enum ListPolicyAttachmentsError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that the requested operation can only operate on policy objects.

NotPolicy(String), @@ -6776,7 +6933,7 @@ pub enum ListPublishedSchemaArnsError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6857,7 +7014,7 @@ pub enum ListTagsForResourceError { InvalidArn(String), ///

Can occur for multiple reasons such as when you tag a resource that doesn’t exist or if you specify a higher number of tags for a resource than the allowed limit. Allowed limit is 50 tags per resource.

InvalidTaggingRequest(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -6934,7 +7091,7 @@ pub enum ListTypedLinkFacetAttributesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7025,7 +7182,7 @@ pub enum ListTypedLinkFacetNamesError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7108,7 +7265,7 @@ pub enum LookupPolicyError { InvalidArn(String), ///

Indicates that the NextToken value is not valid.

InvalidNextToken(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7179,7 +7336,7 @@ pub enum PublishSchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7254,7 +7411,7 @@ pub enum PutSchemaFromJsonError { InvalidRule(String), ///

Indicates that the provided SchemaDoc value is not valid.

InvalidSchemaDoc(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), @@ -7323,7 +7480,7 @@ pub enum RemoveFacetFromObjectError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7406,7 +7563,7 @@ pub enum TagResourceError { InvalidArn(String), ///

Can occur for multiple reasons such as when you tag a resource that doesn’t exist or if you specify a higher number of tags for a resource than the allowed limit. Allowed limit is 50 tags per resource.

InvalidTaggingRequest(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7475,7 +7632,7 @@ pub enum UntagResourceError { InvalidArn(String), ///

Can occur for multiple reasons such as when you tag a resource that doesn’t exist or if you specify a higher number of tags for a resource than the allowed limit. Allowed limit is 50 tags per resource.

InvalidTaggingRequest(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7540,6 +7697,8 @@ pub enum UpdateFacetError { AccessDenied(String), ///

The specified Facet could not be found.

FacetNotFound(String), + ///

The Facet that you provided was not well formed or could not be validated with the schema.

+ FacetValidation(String), ///

Indicates a problem that must be resolved by Amazon Web Services. This might be a transient error in which case you can retry your request until it succeeds. Otherwise, go to the AWS Service Health Dashboard site to see if there are any operational issues with the service.

InternalService(String), ///

Indicates that the provided ARN value is not valid.

@@ -7548,7 +7707,7 @@ pub enum UpdateFacetError { InvalidFacetUpdate(String), ///

Occurs when any of the rule parameter keys or values are invalid.

InvalidRule(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7566,6 +7725,9 @@ impl UpdateFacetError { "FacetNotFoundException" => { return RusotoError::Service(UpdateFacetError::FacetNotFound(err.msg)) } + "FacetValidationException" => { + return RusotoError::Service(UpdateFacetError::FacetValidation(err.msg)) + } "InternalServiceException" => { return RusotoError::Service(UpdateFacetError::InternalService(err.msg)) } @@ -7604,6 +7766,7 @@ impl Error for UpdateFacetError { match *self { UpdateFacetError::AccessDenied(ref cause) => cause, UpdateFacetError::FacetNotFound(ref cause) => cause, + UpdateFacetError::FacetValidation(ref cause) => cause, UpdateFacetError::InternalService(ref cause) => cause, UpdateFacetError::InvalidArn(ref cause) => cause, UpdateFacetError::InvalidFacetUpdate(ref cause) => cause, @@ -7627,7 +7790,7 @@ pub enum UpdateLinkAttributesError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7712,7 +7875,7 @@ pub enum UpdateObjectAttributesError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

Indicates that a link could not be created due to a naming conflict. Choose a different name and then try again.

LinkNameAlreadyInUse(String), @@ -7803,7 +7966,7 @@ pub enum UpdateSchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7874,7 +8037,7 @@ pub enum UpdateTypedLinkFacetError { InvalidFacetUpdate(String), ///

Occurs when any of the rule parameter keys or values are invalid.

InvalidRule(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -7965,12 +8128,14 @@ pub enum UpgradeAppliedSchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

+ ///

Indicates that an attempt to make an attachment was invalid. For example, attaching two nodes with a link type that is not applicable to the nodes or attempting to apply a schema to a directory a second time.

InvalidAttachment(String), ///

The specified resource could not be found.

ResourceNotFound(String), ///

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

RetryableConflict(String), + ///

Indicates that a schema could not be created due to a naming conflict. Please select a different name and then try again.

+ SchemaAlreadyExists(String), } impl UpgradeAppliedSchemaError { @@ -8008,6 +8173,11 @@ impl UpgradeAppliedSchemaError { err.msg, )) } + "SchemaAlreadyExistsException" => { + return RusotoError::Service(UpgradeAppliedSchemaError::SchemaAlreadyExists( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -8030,6 +8200,7 @@ impl Error for UpgradeAppliedSchemaError { UpgradeAppliedSchemaError::InvalidAttachment(ref cause) => cause, UpgradeAppliedSchemaError::ResourceNotFound(ref cause) => cause, UpgradeAppliedSchemaError::RetryableConflict(ref cause) => cause, + UpgradeAppliedSchemaError::SchemaAlreadyExists(ref cause) => cause, } } } @@ -8044,9 +8215,9 @@ pub enum UpgradePublishedSchemaError { InternalService(String), ///

Indicates that the provided ARN value is not valid.

InvalidArn(String), - ///

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

+ ///

Indicates that an attempt to make an attachment was invalid. For example, attaching two nodes with a link type that is not applicable to the nodes or attempting to apply a schema to a directory a second time.

InvalidAttachment(String), - ///

Indicates that limits are exceeded. See Limits for more information.

+ ///

Indicates that limits are exceeded. See Limits for more information.

LimitExceeded(String), ///

The specified resource could not be found.

ResourceNotFound(String), @@ -8152,7 +8323,7 @@ pub trait CloudDirectory { input: AttachToIndexRequest, ) -> RusotoFuture; - ///

Attaches a typed link to a specified source and target object. For more information, see Typed link.

+ ///

Attaches a typed link to a specified source and target object. For more information, see Typed Links.

fn attach_typed_link( &self, input: AttachTypedLinkRequest, @@ -8170,7 +8341,7 @@ pub trait CloudDirectory { input: BatchWriteRequest, ) -> RusotoFuture; - ///

Creates a Directory by copying the published schema into the directory. A directory cannot be created without a schema.

+ ///

Creates a Directory by copying the published schema into the directory. A directory cannot be created without a schema.

You can also quickly create a directory using a managed schema, called the QuickStartSchema. For more information, see Managed Schema in the Amazon Cloud Directory Developer Guide.

fn create_directory( &self, input: CreateDirectoryRequest, @@ -8182,7 +8353,7 @@ pub trait CloudDirectory { input: CreateFacetRequest, ) -> RusotoFuture; - ///

Creates an index object. See Indexing for more information.

+ ///

Creates an index object. See Indexing and search for more information.

fn create_index( &self, input: CreateIndexRequest, @@ -8200,7 +8371,7 @@ pub trait CloudDirectory { input: CreateSchemaRequest, ) -> RusotoFuture; - ///

Creates a TypedLinkFacet. For more information, see Typed link.

+ ///

Creates a TypedLinkFacet. For more information, see Typed Links.

fn create_typed_link_facet( &self, input: CreateTypedLinkFacetRequest, @@ -8218,7 +8389,7 @@ pub trait CloudDirectory { input: DeleteFacetRequest, ) -> RusotoFuture; - ///

Deletes an object and its associated attributes. Only objects with no children and no parents can be deleted.

+ ///

Deletes an object and its associated attributes. Only objects with no children and no parents can be deleted. The maximum number of attributes that can be deleted during an object deletion is 30. For more information, see Amazon Cloud Directory Limits.

fn delete_object( &self, input: DeleteObjectRequest, @@ -8230,7 +8401,7 @@ pub trait CloudDirectory { input: DeleteSchemaRequest, ) -> RusotoFuture; - ///

Deletes a TypedLinkFacet. For more information, see Typed link.

+ ///

Deletes a TypedLinkFacet. For more information, see Typed Links.

fn delete_typed_link_facet( &self, input: DeleteTypedLinkFacetRequest, @@ -8254,7 +8425,7 @@ pub trait CloudDirectory { input: DetachPolicyRequest, ) -> RusotoFuture; - ///

Detaches a typed link from a specified source and target object. For more information, see Typed link.

+ ///

Detaches a typed link from a specified source and target object. For more information, see Typed Links.

fn detach_typed_link( &self, input: DetachTypedLinkRequest, @@ -8305,13 +8476,13 @@ pub trait CloudDirectory { input: GetObjectInformationRequest, ) -> RusotoFuture; - ///

Retrieves a JSON representation of the schema. See JSON Schema Format for more information.

+ ///

Retrieves a JSON representation of the schema. See JSON Schema Format for more information.

fn get_schema_as_json( &self, input: GetSchemaAsJsonRequest, ) -> RusotoFuture; - ///

Returns the identity attribute order for a specific TypedLinkFacet. For more information, see Typed link.

+ ///

Returns the identity attribute order for a specific TypedLinkFacet. For more information, see Typed Links.

fn get_typed_link_facet_information( &self, input: GetTypedLinkFacetInformationRequest, @@ -8353,7 +8524,7 @@ pub trait CloudDirectory { input: ListFacetNamesRequest, ) -> RusotoFuture; - ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

fn list_incoming_typed_links( &self, input: ListIncomingTypedLinksRequest, @@ -8365,6 +8536,12 @@ pub trait CloudDirectory { input: ListIndexRequest, ) -> RusotoFuture; + ///

Lists the major version families of each managed schema. If a major version ARN is provided as SchemaArn, the minor version revisions in that family are listed instead.

+ fn list_managed_schema_arns( + &self, + input: ListManagedSchemaArnsRequest, + ) -> RusotoFuture; + ///

Lists all attributes that are associated with an object.

fn list_object_attributes( &self, @@ -8377,7 +8554,7 @@ pub trait CloudDirectory { input: ListObjectChildrenRequest, ) -> RusotoFuture; - ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

Use this API to evaluate all parents for an object. The call returns all objects from the root of the directory up to the requested object. The API returns the number of paths based on user-defined MaxResults, in case there are multiple paths to the parent. The order of the paths and nodes returned is consistent among multiple API calls unless the objects are deleted or moved. Paths not leading to the directory root are ignored from the target object.

+ ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

Use this API to evaluate all parents for an object. The call returns all objects from the root of the directory up to the requested object. The API returns the number of paths based on user-defined MaxResults, in case there are multiple paths to the parent. The order of the paths and nodes returned is consistent among multiple API calls unless the objects are deleted or moved. Paths not leading to the directory root are ignored from the target object.

fn list_object_parent_paths( &self, input: ListObjectParentPathsRequest, @@ -8395,7 +8572,7 @@ pub trait CloudDirectory { input: ListObjectPoliciesRequest, ) -> RusotoFuture; - ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

fn list_outgoing_typed_links( &self, input: ListOutgoingTypedLinksRequest, @@ -8419,19 +8596,19 @@ pub trait CloudDirectory { input: ListTagsForResourceRequest, ) -> RusotoFuture; - ///

Returns a paginated list of all attribute definitions for a particular TypedLinkFacet. For more information, see Typed link.

+ ///

Returns a paginated list of all attribute definitions for a particular TypedLinkFacet. For more information, see Typed Links.

fn list_typed_link_facet_attributes( &self, input: ListTypedLinkFacetAttributesRequest, ) -> RusotoFuture; - ///

Returns a paginated list of TypedLink facet names for a particular schema. For more information, see Typed link.

+ ///

Returns a paginated list of TypedLink facet names for a particular schema. For more information, see Typed Links.

fn list_typed_link_facet_names( &self, input: ListTypedLinkFacetNamesRequest, ) -> RusotoFuture; - ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

+ ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

fn lookup_policy( &self, input: LookupPolicyRequest, @@ -8443,7 +8620,7 @@ pub trait CloudDirectory { input: PublishSchemaRequest, ) -> RusotoFuture; - ///

Allows a schema to be updated using JSON upload. Only available for development schemas. See JSON Schema Format for more information.

+ ///

Allows a schema to be updated using JSON upload. Only available for development schemas. See JSON Schema Format for more information.

fn put_schema_from_json( &self, input: PutSchemaFromJsonRequest, @@ -8491,7 +8668,7 @@ pub trait CloudDirectory { input: UpdateSchemaRequest, ) -> RusotoFuture; - ///

Updates a TypedLinkFacet. For more information, see Typed link.

+ ///

Updates a TypedLinkFacet. For more information, see Typed Links.

fn update_typed_link_facet( &self, input: UpdateTypedLinkFacetRequest, @@ -8521,10 +8698,7 @@ impl CloudDirectoryClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudDirectoryClient { - CloudDirectoryClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -8538,10 +8712,14 @@ impl CloudDirectoryClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudDirectoryClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudDirectoryClient { + CloudDirectoryClient { client, region } } } @@ -8711,7 +8889,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Attaches a typed link to a specified source and target object. For more information, see Typed link.

+ ///

Attaches a typed link to a specified source and target object. For more information, see Typed Links.

fn attach_typed_link( &self, input: AttachTypedLinkRequest, @@ -8814,7 +8992,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Creates a Directory by copying the published schema into the directory. A directory cannot be created without a schema.

+ ///

Creates a Directory by copying the published schema into the directory. A directory cannot be created without a schema.

You can also quickly create a directory using a managed schema, called the QuickStartSchema. For more information, see Managed Schema in the Amazon Cloud Directory Developer Guide.

fn create_directory( &self, input: CreateDirectoryRequest, @@ -8880,7 +9058,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Creates an index object. See Indexing for more information.

+ ///

Creates an index object. See Indexing and search for more information.

fn create_index( &self, input: CreateIndexRequest, @@ -8978,7 +9156,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Creates a TypedLinkFacet. For more information, see Typed link.

+ ///

Creates a TypedLinkFacet. For more information, see Typed Links.

fn create_typed_link_facet( &self, input: CreateTypedLinkFacetRequest, @@ -9074,7 +9252,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Deletes an object and its associated attributes. Only objects with no children and no parents can be deleted.

+ ///

Deletes an object and its associated attributes. Only objects with no children and no parents can be deleted. The maximum number of attributes that can be deleted during an object deletion is 30. For more information, see Amazon Cloud Directory Limits.

fn delete_object( &self, input: DeleteObjectRequest, @@ -9138,7 +9316,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Deletes a TypedLinkFacet. For more information, see Typed link.

+ ///

Deletes a TypedLinkFacet. For more information, see Typed Links.

fn delete_typed_link_facet( &self, input: DeleteTypedLinkFacetRequest, @@ -9269,7 +9447,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Detaches a typed link from a specified source and target object. For more information, see Typed link.

+ ///

Detaches a typed link from a specified source and target object. For more information, see Typed Links.

fn detach_typed_link( &self, input: DetachTypedLinkRequest, @@ -9558,7 +9736,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Retrieves a JSON representation of the schema. See JSON Schema Format for more information.

+ ///

Retrieves a JSON representation of the schema. See JSON Schema Format for more information.

fn get_schema_as_json( &self, input: GetSchemaAsJsonRequest, @@ -9589,7 +9767,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Returns the identity attribute order for a specific TypedLinkFacet. For more information, see Typed link.

+ ///

Returns the identity attribute order for a specific TypedLinkFacet. For more information, see Typed Links.

fn get_typed_link_facet_information( &self, input: GetTypedLinkFacetInformationRequest, @@ -9812,7 +9990,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

fn list_incoming_typed_links( &self, input: ListIncomingTypedLinksRequest, @@ -9881,6 +10059,37 @@ impl CloudDirectory for CloudDirectoryClient { }) } + ///

Lists the major version families of each managed schema. If a major version ARN is provided as SchemaArn, the minor version revisions in that family are listed instead.

+ fn list_managed_schema_arns( + &self, + input: ListManagedSchemaArnsRequest, + ) -> RusotoFuture { + let request_uri = "/amazonclouddirectory/2017-01-11/schema/managed"; + + let mut request = SignedRequest::new("POST", "clouddirectory", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListManagedSchemaArnsError::from_response(response)) + }), + ) + } + }) + } + ///

Lists all attributes that are associated with an object.

fn list_object_attributes( &self, @@ -9954,7 +10163,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

Use this API to evaluate all parents for an object. The call returns all objects from the root of the directory up to the requested object. The API returns the number of paths based on user-defined MaxResults, in case there are multiple paths to the parent. The order of the paths and nodes returned is consistent among multiple API calls unless the objects are deleted or moved. Paths not leading to the directory root are ignored from the target object.

+ ///

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

Use this API to evaluate all parents for an object. The call returns all objects from the root of the directory up to the requested object. The API returns the number of paths based on user-defined MaxResults, in case there are multiple paths to the parent. The order of the paths and nodes returned is consistent among multiple API calls unless the objects are deleted or moved. Paths not leading to the directory root are ignored from the target object.

fn list_object_parent_paths( &self, input: ListObjectParentPathsRequest, @@ -10060,7 +10269,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

+ ///

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed Links.

fn list_outgoing_typed_links( &self, input: ListOutgoingTypedLinksRequest, @@ -10188,7 +10397,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Returns a paginated list of all attribute definitions for a particular TypedLinkFacet. For more information, see Typed link.

+ ///

Returns a paginated list of all attribute definitions for a particular TypedLinkFacet. For more information, see Typed Links.

fn list_typed_link_facet_attributes( &self, input: ListTypedLinkFacetAttributesRequest, @@ -10218,7 +10427,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Returns a paginated list of TypedLink facet names for a particular schema. For more information, see Typed link.

+ ///

Returns a paginated list of TypedLink facet names for a particular schema. For more information, see Typed Links.

fn list_typed_link_facet_names( &self, input: ListTypedLinkFacetNamesRequest, @@ -10248,7 +10457,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

+ ///

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

fn lookup_policy( &self, input: LookupPolicyRequest, @@ -10314,7 +10523,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Allows a schema to be updated using JSON upload. Only available for development schemas. See JSON Schema Format for more information.

+ ///

Allows a schema to be updated using JSON upload. Only available for development schemas. See JSON Schema Format for more information.

fn put_schema_from_json( &self, input: PutSchemaFromJsonRequest, @@ -10573,7 +10782,7 @@ impl CloudDirectory for CloudDirectoryClient { }) } - ///

Updates a TypedLinkFacet. For more information, see Typed link.

+ ///

Updates a TypedLinkFacet. For more information, see Typed Links.

fn update_typed_link_facet( &self, input: UpdateTypedLinkFacetRequest, diff --git a/rusoto/services/clouddirectory/src/lib.rs b/rusoto/services/clouddirectory/src/lib.rs index 55dc124902b..bc92129719c 100644 --- a/rusoto/services/clouddirectory/src/lib.rs +++ b/rusoto/services/clouddirectory/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon Cloud Directory

Amazon Cloud Directory is a component of the AWS Directory Service that simplifies the development and management of cloud-scale web, mobile, and IoT applications. This guide describes the Cloud Directory operations that you can call programmatically and includes detailed information on data types and errors. For information about AWS Directory Services features, see AWS Directory Service and the AWS Directory Service Administration Guide.

+//!

Amazon Cloud Directory

Amazon Cloud Directory is a component of the AWS Directory Service that simplifies the development and management of cloud-scale web, mobile, and IoT applications. This guide describes the Cloud Directory operations that you can call programmatically and includes detailed information on data types and errors. For information about Cloud Directory features, see AWS Directory Service and the Amazon Cloud Directory Developer Guide.

//! //! If you're using the service, you're probably looking for [CloudDirectoryClient](struct.CloudDirectoryClient.html) and [CloudDirectory](trait.CloudDirectory.html). diff --git a/rusoto/services/cloudformation/Cargo.toml b/rusoto/services/cloudformation/Cargo.toml index f47c7aecbc2..c07267d0f3f 100644 --- a/rusoto/services/cloudformation/Cargo.toml +++ b/rusoto/services/cloudformation/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudformation" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudformation/README.md b/rusoto/services/cloudformation/README.md index 3a3091c0aa2..4a368b80f9b 100644 --- a/rusoto/services/cloudformation/README.md +++ b/rusoto/services/cloudformation/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudformation` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_cloudformation = "0.40.0" +rusoto_cloudformation = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudformation/src/custom/custom_tests.rs b/rusoto/services/cloudformation/src/custom/custom_tests.rs index f5b3ca547e0..98ec70b5096 100644 --- a/rusoto/services/cloudformation/src/custom/custom_tests.rs +++ b/rusoto/services/cloudformation/src/custom/custom_tests.rs @@ -2,17 +2,18 @@ extern crate rusoto_mock; use crate::generated::{CloudFormation, CloudFormationClient, ListStacksInput}; -use rusoto_core::Region; +use self::rusoto_mock::*; +use rusoto_core::param::Params; use rusoto_core::signature::SignedRequest; use rusoto_core::signature::SignedRequestPayload; -use rusoto_core::param::Params; +use rusoto_core::Region; use serde_urlencoded; -use self::rusoto_mock::*; #[test] fn should_serialize_list_parameters_in_request_body() { let mock = MockRequestDispatcher::with_status(200) - .with_body(r#" + .with_body( + r#" @@ -44,23 +45,34 @@ fn should_serialize_list_parameters_in_request_body() { b9b4b068-3a41-11e5-94eb-example - "#) + "#, + ) .with_request_checker(|request: &SignedRequest| { assert_eq!("POST", request.method); assert_eq!("/", request.path); if let Some(SignedRequestPayload::Buffer(ref buffer)) = request.payload { let params: Params = serde_urlencoded::from_bytes(buffer).unwrap(); - assert_eq!(Some(&Some("CREATE_IN_PROGRESS".to_owned())), - params.get("StackStatusFilter.member.1")); - assert_eq!(Some(&Some("DELETE_COMPLETE".to_owned())), - params.get("StackStatusFilter.member.2")); + assert_eq!( + Some(&Some("CREATE_IN_PROGRESS".to_owned())), + params.get("StackStatusFilter.member.1") + ); + assert_eq!( + Some(&Some("DELETE_COMPLETE".to_owned())), + params.get("StackStatusFilter.member.2") + ); } else { panic!("Unexpected request.payload: {:?}", request.payload); } }); - let filters = vec!["CREATE_IN_PROGRESS".to_owned(), "DELETE_COMPLETE".to_owned()]; - let request = ListStacksInput { stack_status_filter: Some(filters), ..Default::default() }; + let filters = vec![ + "CREATE_IN_PROGRESS".to_owned(), + "DELETE_COMPLETE".to_owned(), + ]; + let request = ListStacksInput { + stack_status_filter: Some(filters), + ..Default::default() + }; let client = CloudFormationClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); let _result = client.list_stacks(request).sync().unwrap(); diff --git a/rusoto/services/cloudformation/src/custom/mod.rs b/rusoto/services/cloudformation/src/custom/mod.rs index 9a14b939383..e4234693714 100644 --- a/rusoto/services/cloudformation/src/custom/mod.rs +++ b/rusoto/services/cloudformation/src/custom/mod.rs @@ -1,2 +1,2 @@ #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/cloudformation/src/generated.rs b/rusoto/services/cloudformation/src/generated.rs index a6dd04a2611..a0dc8da7631 100644 --- a/rusoto/services/cloudformation/src/generated.rs +++ b/rusoto/services/cloudformation/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -9803,10 +9802,7 @@ impl CloudFormationClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudFormationClient { - CloudFormationClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -9820,10 +9816,14 @@ impl CloudFormationClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudFormationClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudFormationClient { + CloudFormationClient { client, region } } } @@ -9885,7 +9885,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9936,7 +9936,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9987,7 +9987,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10037,7 +10037,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10088,7 +10088,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10139,7 +10139,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10214,7 +10214,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10265,7 +10265,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10313,7 +10313,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10364,7 +10364,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10415,7 +10415,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10465,7 +10465,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10513,7 +10513,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10561,7 +10561,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10609,7 +10609,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10657,7 +10657,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10708,7 +10708,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10756,7 +10756,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10807,7 +10807,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10858,7 +10858,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10906,7 +10906,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10956,7 +10956,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11007,7 +11007,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11058,7 +11058,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11109,7 +11109,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11160,7 +11160,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11211,7 +11211,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11262,7 +11262,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11313,7 +11313,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11364,7 +11364,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11415,7 +11415,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11463,7 +11463,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11511,7 +11511,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11562,7 +11562,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11613,7 +11613,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11712,7 +11712,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11763,7 +11763,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11813,7 +11813,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11864,7 +11864,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11912,7 +11912,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11963,7 +11963,7 @@ impl CloudFormation for CloudFormationClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/cloudfront/Cargo.toml b/rusoto/services/cloudfront/Cargo.toml index 22640e496b7..6d576d4021a 100644 --- a/rusoto/services/cloudfront/Cargo.toml +++ b/rusoto/services/cloudfront/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudfront" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,17 +17,19 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -xml-rs = "0.7" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudfront/README.md b/rusoto/services/cloudfront/README.md index 47d911eb816..ec8005f9c97 100644 --- a/rusoto/services/cloudfront/README.md +++ b/rusoto/services/cloudfront/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudfront` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_cloudfront = "0.40.0" +rusoto_cloudfront = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudfront/src/custom/custom_tests.rs b/rusoto/services/cloudfront/src/custom/custom_tests.rs index a45d3322c3f..3ead5ddfe1d 100644 --- a/rusoto/services/cloudfront/src/custom/custom_tests.rs +++ b/rusoto/services/cloudfront/src/custom/custom_tests.rs @@ -2,12 +2,15 @@ extern crate rusoto_mock; use crate::generated::{CloudFront, CloudFrontClient, ListDistributionsRequest}; -use rusoto_core::Region; use self::rusoto_mock::*; +use rusoto_core::Region; #[test] fn should_list_distributions() { - let body = MockResponseReader::read_response("test_resources/generated/valid", "cloudfront-list-distributions.xml"); + let body = MockResponseReader::read_response( + "test_resources/generated/valid", + "cloudfront-list-distributions.xml", + ); let mock = MockRequestDispatcher::with_status(200).with_body(&body); let request = ListDistributionsRequest::default(); @@ -24,5 +27,9 @@ fn should_list_distributions() { assert_eq!(first_item.status, "Deployed"); assert_eq!(first_item.domain_name, "d111111abcdef8.cloudfront.net"); assert_eq!(first_item.origins.quantity, 2); - assert!(first_item.origins.items.iter().any(|x| x.domain_name == "example.com")); + assert!(first_item + .origins + .items + .iter() + .any(|x| x.domain_name == "example.com")); } diff --git a/rusoto/services/cloudfront/src/custom/mod.rs b/rusoto/services/cloudfront/src/custom/mod.rs index 9a14b939383..e4234693714 100644 --- a/rusoto/services/cloudfront/src/custom/mod.rs +++ b/rusoto/services/cloudfront/src/custom/mod.rs @@ -1,2 +1,2 @@ #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/cloudfront/src/generated.rs b/rusoto/services/cloudfront/src/generated.rs index 17474f17605..473ecbd12b6 100644 --- a/rusoto/services/cloudfront/src/generated.rs +++ b/rusoto/services/cloudfront/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -12093,10 +12092,7 @@ impl CloudFrontClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudFrontClient { - CloudFrontClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -12110,10 +12106,14 @@ impl CloudFrontClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudFrontClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudFrontClient { + CloudFrontClient { client, region } } } @@ -12156,7 +12156,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12215,7 +12215,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12274,7 +12274,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12333,7 +12333,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12392,7 +12392,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12454,7 +12454,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12509,7 +12509,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12565,7 +12565,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12629,7 +12629,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12851,7 +12851,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12903,7 +12903,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12951,7 +12951,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12995,7 +12995,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13039,7 +13039,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13086,7 +13086,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13134,7 +13134,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13186,7 +13186,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13237,7 +13237,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13281,7 +13281,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13326,7 +13326,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13370,7 +13370,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13418,7 +13418,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13476,7 +13476,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13529,7 +13529,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13582,7 +13582,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13635,7 +13635,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13688,7 +13688,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13744,7 +13744,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13797,7 +13797,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13847,7 +13847,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13894,7 +13894,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14013,7 +14013,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14072,7 +14072,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14134,7 +14134,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14196,7 +14196,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14255,7 +14255,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14314,7 +14314,7 @@ impl CloudFront for CloudFrontClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/cloudhsm/Cargo.toml b/rusoto/services/cloudhsm/Cargo.toml index e7e34575981..b978809245a 100644 --- a/rusoto/services/cloudhsm/Cargo.toml +++ b/rusoto/services/cloudhsm/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudhsm" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudhsm/README.md b/rusoto/services/cloudhsm/README.md index dd3b915e274..8bbf80221f0 100644 --- a/rusoto/services/cloudhsm/README.md +++ b/rusoto/services/cloudhsm/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudhsm` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_cloudhsm = "0.40.0" +rusoto_cloudhsm = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudhsm/src/custom/mod.rs b/rusoto/services/cloudhsm/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cloudhsm/src/custom/mod.rs +++ b/rusoto/services/cloudhsm/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cloudhsm/src/generated.rs b/rusoto/services/cloudhsm/src/generated.rs index eb670b5353b..a69be957f9d 100644 --- a/rusoto/services/cloudhsm/src/generated.rs +++ b/rusoto/services/cloudhsm/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -35,7 +34,7 @@ pub struct AddTagsToResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsToResourceResponse { ///

The status of the operation.

#[serde(rename = "Status")] @@ -52,7 +51,7 @@ pub struct CreateHapgRequest { ///

Contains the output of the CreateHAPartitionGroup action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHapgResponse { ///

The ARN of the high-availability partition group.

#[serde(rename = "HapgArn")] @@ -94,7 +93,7 @@ pub struct CreateHsmRequest { ///

Contains the output of the CreateHsm operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHsmResponse { ///

The ARN of the HSM.

#[serde(rename = "HsmArn")] @@ -116,7 +115,7 @@ pub struct CreateLunaClientRequest { ///

Contains the output of the CreateLunaClient action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLunaClientResponse { ///

The ARN of the client.

#[serde(rename = "ClientArn")] @@ -134,7 +133,7 @@ pub struct DeleteHapgRequest { ///

Contains the output of the DeleteHapg action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteHapgResponse { ///

The status of the action.

#[serde(rename = "Status")] @@ -151,7 +150,7 @@ pub struct DeleteHsmRequest { ///

Contains the output of the DeleteHsm operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteHsmResponse { ///

The status of the operation.

#[serde(rename = "Status")] @@ -166,7 +165,7 @@ pub struct DeleteLunaClientRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLunaClientResponse { ///

The status of the action.

#[serde(rename = "Status")] @@ -183,7 +182,7 @@ pub struct DescribeHapgRequest { ///

Contains the output of the DescribeHapg action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeHapgResponse { ///

The ARN of the high-availability partition group.

#[serde(rename = "HapgArn")] @@ -238,7 +237,7 @@ pub struct DescribeHsmRequest { ///

Contains the output of the DescribeHsm operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeHsmResponse { ///

The Availability Zone that the HSM is in.

#[serde(rename = "AvailabilityZone")] @@ -338,7 +337,7 @@ pub struct DescribeLunaClientRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLunaClientResponse { ///

The certificate installed on the HSMs used by this client.

#[serde(rename = "Certificate")] @@ -376,7 +375,7 @@ pub struct GetConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConfigResponse { ///

The certificate file containing the server.pem files of the HSMs.

#[serde(rename = "ConfigCred")] @@ -397,7 +396,7 @@ pub struct GetConfigResponse { pub struct ListAvailableZonesRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAvailableZonesResponse { ///

The list of Availability Zones that have available AWS CloudHSM capacity.

#[serde(rename = "AZList")] @@ -414,7 +413,7 @@ pub struct ListHapgsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHapgsResponse { ///

The list of high-availability partition groups.

#[serde(rename = "HapgList")] @@ -435,7 +434,7 @@ pub struct ListHsmsRequest { ///

Contains the output of the ListHsms operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHsmsResponse { ///

The list of ARNs that identify the HSMs.

#[serde(rename = "HsmList")] @@ -456,7 +455,7 @@ pub struct ListLunaClientsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLunaClientsResponse { ///

The list of clients.

#[serde(rename = "ClientList")] @@ -475,7 +474,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

One or more tags.

#[serde(rename = "TagList")] @@ -498,7 +497,7 @@ pub struct ModifyHapgRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyHapgResponse { ///

The ARN of the high-availability partition group.

#[serde(rename = "HapgArn")] @@ -536,7 +535,7 @@ pub struct ModifyHsmRequest { ///

Contains the output of the ModifyHsm operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyHsmResponse { ///

The ARN of the HSM.

#[serde(rename = "HsmArn")] @@ -555,7 +554,7 @@ pub struct ModifyLunaClientRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyLunaClientResponse { ///

The ARN of the client.

#[serde(rename = "ClientArn")] @@ -574,7 +573,7 @@ pub struct RemoveTagsFromResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsFromResourceResponse { ///

The status of the operation.

#[serde(rename = "Status")] @@ -1618,10 +1617,7 @@ impl CloudHsmClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudHsmClient { - CloudHsmClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1635,10 +1631,14 @@ impl CloudHsmClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudHsmClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudHsmClient { + CloudHsmClient { client, region } } } diff --git a/rusoto/services/cloudhsmv2/Cargo.toml b/rusoto/services/cloudhsmv2/Cargo.toml index 7264ff162a0..dffa6211f83 100644 --- a/rusoto/services/cloudhsmv2/Cargo.toml +++ b/rusoto/services/cloudhsmv2/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudhsmv2" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudhsmv2/README.md b/rusoto/services/cloudhsmv2/README.md index 56a58d4cf7a..28cfcb7aef9 100644 --- a/rusoto/services/cloudhsmv2/README.md +++ b/rusoto/services/cloudhsmv2/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudhsmv2` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_cloudhsmv2 = "0.40.0" +rusoto_cloudhsmv2 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudhsmv2/src/custom/mod.rs b/rusoto/services/cloudhsmv2/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cloudhsmv2/src/custom/mod.rs +++ b/rusoto/services/cloudhsmv2/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cloudhsmv2/src/generated.rs b/rusoto/services/cloudhsmv2/src/generated.rs index cad3eadba15..dcd535d5c55 100644 --- a/rusoto/services/cloudhsmv2/src/generated.rs +++ b/rusoto/services/cloudhsmv2/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Contains information about a backup of an AWS CloudHSM cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Backup { ///

The identifier (ID) of the backup.

#[serde(rename = "BackupId")] @@ -63,7 +62,7 @@ pub struct Backup { ///

Contains one or more certificates or a certificate signing request (CSR).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Certificates { ///

The HSM hardware certificate issued (signed) by AWS CloudHSM.

#[serde(rename = "AwsHardwareCertificate")] @@ -89,7 +88,7 @@ pub struct Certificates { ///

Contains information about an AWS CloudHSM cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Cluster { ///

The cluster's backup policy.

#[serde(rename = "BackupPolicy")] @@ -156,7 +155,7 @@ pub struct CopyBackupToRegionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CopyBackupToRegionResponse { ///

Information on the backup that will be copied to the destination region, including CreateTimestamp, SourceBackup, SourceCluster, and Source Region. CreateTimestamp of the destination backup will be the same as that of the source backup.

You will need to use the sourceBackupID returned in this operation to use the DescribeBackups operation on the backup that will be copied to the destination region.

#[serde(rename = "DestinationBackup")] @@ -179,7 +178,7 @@ pub struct CreateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClusterResponse { ///

Information about the cluster that was created.

#[serde(rename = "Cluster")] @@ -202,7 +201,7 @@ pub struct CreateHsmRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHsmResponse { ///

Information about the HSM that was created.

#[serde(rename = "Hsm")] @@ -218,7 +217,7 @@ pub struct DeleteBackupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBackupResponse { ///

Information on the Backup object deleted.

#[serde(rename = "Backup")] @@ -234,7 +233,7 @@ pub struct DeleteClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteClusterResponse { ///

Information about the cluster that was deleted.

#[serde(rename = "Cluster")] @@ -262,7 +261,7 @@ pub struct DeleteHsmRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteHsmResponse { ///

The identifier (ID) of the HSM that was deleted.

#[serde(rename = "HsmId")] @@ -290,7 +289,7 @@ pub struct DescribeBackupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBackupsResponse { ///

A list of backups.

#[serde(rename = "Backups")] @@ -319,7 +318,7 @@ pub struct DescribeClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClustersResponse { ///

A list of clusters.

#[serde(rename = "Clusters")] @@ -332,7 +331,7 @@ pub struct DescribeClustersResponse { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DestinationBackup { #[serde(rename = "CreateTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] @@ -350,7 +349,7 @@ pub struct DestinationBackup { ///

Contains information about a hardware security module (HSM) in an AWS CloudHSM cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Hsm { ///

The Availability Zone that contains the HSM.

#[serde(rename = "AvailabilityZone")] @@ -399,7 +398,7 @@ pub struct InitializeClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitializeClusterResponse { ///

The cluster's state.

#[serde(rename = "State")] @@ -427,7 +426,7 @@ pub struct ListTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

An opaque string that indicates that the response contains only a subset of tags. Use this value in a subsequent ListTags request to get more tags.

#[serde(rename = "NextToken")] @@ -446,7 +445,7 @@ pub struct RestoreBackupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreBackupResponse { ///

Information on the Backup object created.

#[serde(rename = "Backup")] @@ -476,7 +475,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -490,7 +489,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} /// Errors returned by CopyBackupToRegion @@ -1377,10 +1376,7 @@ impl CloudHsmv2Client { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudHsmv2Client { - CloudHsmv2Client { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1394,10 +1390,14 @@ impl CloudHsmv2Client { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudHsmv2Client { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudHsmv2Client { + CloudHsmv2Client { client, region } } } diff --git a/rusoto/services/cloudsearch/Cargo.toml b/rusoto/services/cloudsearch/Cargo.toml index 2b3cf5f6ddb..4a702730ebe 100644 --- a/rusoto/services/cloudsearch/Cargo.toml +++ b/rusoto/services/cloudsearch/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudsearch" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudsearch/README.md b/rusoto/services/cloudsearch/README.md index 8806a3a7585..04de33f303b 100644 --- a/rusoto/services/cloudsearch/README.md +++ b/rusoto/services/cloudsearch/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudsearch` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_cloudsearch = "0.40.0" +rusoto_cloudsearch = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudsearch/src/custom/mod.rs b/rusoto/services/cloudsearch/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cloudsearch/src/custom/mod.rs +++ b/rusoto/services/cloudsearch/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cloudsearch/src/generated.rs b/rusoto/services/cloudsearch/src/generated.rs index b26c56ff1a3..496e1d4e522 100644 --- a/rusoto/services/cloudsearch/src/generated.rs +++ b/rusoto/services/cloudsearch/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -5597,10 +5596,7 @@ impl CloudSearchClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudSearchClient { - CloudSearchClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5614,10 +5610,14 @@ impl CloudSearchClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudSearchClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudSearchClient { + CloudSearchClient { client, region } } } @@ -5654,7 +5654,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5705,7 +5705,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5755,7 +5755,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5806,7 +5806,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5857,7 +5857,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5908,7 +5908,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5958,7 +5958,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6009,7 +6009,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6060,7 +6060,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6111,7 +6111,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6162,7 +6162,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6210,7 +6210,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6258,7 +6258,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6309,7 +6309,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6359,7 +6359,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6409,7 +6409,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6457,7 +6457,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6506,7 +6506,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6557,7 +6557,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6608,7 +6608,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6656,7 +6656,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6704,7 +6704,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6752,7 +6752,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6800,7 +6800,7 @@ impl CloudSearch for CloudSearchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/cloudsearchdomain/Cargo.toml b/rusoto/services/cloudsearchdomain/Cargo.toml index 2d306eaf2e0..d224b86f3eb 100644 --- a/rusoto/services/cloudsearchdomain/Cargo.toml +++ b/rusoto/services/cloudsearchdomain/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudsearchdomain" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -21,14 +21,16 @@ serde = "1.0.2" serde_derive = "1.0.2" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudsearchdomain/README.md b/rusoto/services/cloudsearchdomain/README.md index c3c56c9177f..39d8e4ec00f 100644 --- a/rusoto/services/cloudsearchdomain/README.md +++ b/rusoto/services/cloudsearchdomain/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudsearchdomain` in your application, add it as a dependency in ```toml [dependencies] -rusoto_cloudsearchdomain = "0.40.0" +rusoto_cloudsearchdomain = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudsearchdomain/src/custom/mod.rs b/rusoto/services/cloudsearchdomain/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cloudsearchdomain/src/custom/mod.rs +++ b/rusoto/services/cloudsearchdomain/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cloudsearchdomain/src/generated.rs b/rusoto/services/cloudsearchdomain/src/generated.rs index c896e70238d..d462d902893 100644 --- a/rusoto/services/cloudsearchdomain/src/generated.rs +++ b/rusoto/services/cloudsearchdomain/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; ///

A container for facet information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Bucket { ///

The number of hits that contain the facet value in the specified facet field.

#[serde(rename = "count")] @@ -40,7 +39,7 @@ pub struct Bucket { ///

A container for the calculated facet values and counts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BucketInfo { ///

A list of the calculated facet values and counts.

#[serde(rename = "buckets")] @@ -50,7 +49,7 @@ pub struct BucketInfo { ///

A warning returned by the document service when an issue is discovered while processing an upload request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentServiceWarning { ///

The description for a warning returned by the document service.

#[serde(rename = "message")] @@ -60,7 +59,7 @@ pub struct DocumentServiceWarning { ///

The statistics for a field calculated in the request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FieldStats { ///

The number of documents that contain a value in the specified field in the result set.

#[serde(rename = "count")] @@ -98,7 +97,7 @@ pub struct FieldStats { ///

Information about a document that matches the search request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Hit { ///

The expressions returned from a document that matches the search request.

#[serde(rename = "exprs")] @@ -120,7 +119,7 @@ pub struct Hit { ///

The collection of documents that match the search request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Hits { ///

A cursor that can be used to retrieve the next set of matching documents when you want to page through a large result set.

#[serde(rename = "cursor")] @@ -202,7 +201,7 @@ pub struct SearchRequest { ///

The result of a Search request. Contains the documents that match the specified search criteria and any requested fields, highlights, and facet information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchResponse { ///

The requested facet information.

#[serde(rename = "facets")] @@ -224,7 +223,7 @@ pub struct SearchResponse { ///

Contains the resource id (rid) and the time it took to process the request (timems).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchStatus { ///

The encrypted resource ID for the request.

#[serde(rename = "rid")] @@ -238,7 +237,7 @@ pub struct SearchStatus { ///

Container for the suggestion information returned in a SuggestResponse.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SuggestModel { ///

The number of documents that were found to match the query string.

#[serde(rename = "found")] @@ -271,7 +270,7 @@ pub struct SuggestRequest { ///

Contains the response to a Suggest request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SuggestResponse { ///

The status of a SuggestRequest. Contains the resource ID (rid) and how long it took to process the request (timems).

#[serde(rename = "status")] @@ -285,7 +284,7 @@ pub struct SuggestResponse { ///

Contains the resource id (rid) and the time it took to process the request (timems).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SuggestStatus { ///

The encrypted resource ID for the request.

#[serde(rename = "rid")] @@ -299,7 +298,7 @@ pub struct SuggestStatus { ///

An autocomplete suggestion that matches the query string specified in a SuggestRequest.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SuggestionMatch { ///

The document ID of the suggested document.

#[serde(rename = "id")] @@ -333,7 +332,7 @@ pub struct UploadDocumentsRequest { ///

Contains the response to an UploadDocuments request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UploadDocumentsResponse { ///

The number of documents that were added to the search domain.

#[serde(rename = "adds")] @@ -474,10 +473,7 @@ impl CloudSearchDomainClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudSearchDomainClient { - CloudSearchDomainClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -491,10 +487,14 @@ impl CloudSearchDomainClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudSearchDomainClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudSearchDomainClient { + CloudSearchDomainClient { client, region } } } @@ -549,7 +549,8 @@ impl CloudSearchDomain for CloudSearchDomainClient { if let Some(ref x) = input.stats { params.put("stats", x); } - params.put("format", "sdk&pretty"); + params.put("format", "sdk"); + params.put("pretty", "true"); request.set_params(params); self.client.sign_and_dispatch(request, |response| { @@ -586,7 +587,8 @@ impl CloudSearchDomain for CloudSearchDomainClient { params.put("size", x); } params.put("suggester", &input.suggester); - params.put("format", "sdk&pretty"); + params.put("format", "sdk"); + params.put("pretty", "true"); request.set_params(params); self.client.sign_and_dispatch(request, |response| { diff --git a/rusoto/services/cloudtrail/Cargo.toml b/rusoto/services/cloudtrail/Cargo.toml index 27501b8a516..dcbed884ef5 100644 --- a/rusoto/services/cloudtrail/Cargo.toml +++ b/rusoto/services/cloudtrail/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudtrail" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudtrail/README.md b/rusoto/services/cloudtrail/README.md index c4b137c8519..c8a27415630 100644 --- a/rusoto/services/cloudtrail/README.md +++ b/rusoto/services/cloudtrail/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudtrail` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_cloudtrail = "0.40.0" +rusoto_cloudtrail = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudtrail/src/custom/mod.rs b/rusoto/services/cloudtrail/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cloudtrail/src/custom/mod.rs +++ b/rusoto/services/cloudtrail/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cloudtrail/src/generated.rs b/rusoto/services/cloudtrail/src/generated.rs index c56b0c2ea6c..2ddf7333bf6 100644 --- a/rusoto/services/cloudtrail/src/generated.rs +++ b/rusoto/services/cloudtrail/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -38,7 +37,7 @@ pub struct AddTagsRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsResponse {} ///

Specifies the settings for each trail.

@@ -90,7 +89,7 @@ pub struct CreateTrailRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTrailResponse { ///

Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

#[serde(rename = "CloudWatchLogsLogGroupArn")] @@ -165,7 +164,7 @@ pub struct DeleteTrailRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTrailResponse {} ///

Returns information about the trail.

@@ -183,7 +182,7 @@ pub struct DescribeTrailsRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrailsResponse { ///

The list of trail objects.

#[serde(rename = "trailList")] @@ -193,7 +192,7 @@ pub struct DescribeTrailsResponse { ///

Contains information about an event that was returned by a lookup request. The result includes a representation of a CloudTrail event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Event { ///

The AWS access key ID that was used to sign the request. If the request was made with temporary security credentials, this is the access key ID of the temporary credentials.

#[serde(rename = "AccessKeyId")] @@ -258,7 +257,7 @@ pub struct GetEventSelectorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetEventSelectorsResponse { ///

The event selectors that are configured for the trail.

#[serde(rename = "EventSelectors")] @@ -280,7 +279,7 @@ pub struct GetTrailStatusRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTrailStatusResponse { ///

Whether the CloudTrail is currently logging AWS API calls.

#[serde(rename = "IsLogging")] @@ -371,7 +370,7 @@ pub struct ListPublicKeysRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPublicKeysResponse { ///

Reserved for future use.

#[serde(rename = "NextToken")] @@ -397,7 +396,7 @@ pub struct ListTagsRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

Reserved for future use.

#[serde(rename = "NextToken")] @@ -447,7 +446,7 @@ pub struct LookupEventsRequest { ///

Contains a response to a LookupEvents action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LookupEventsResponse { ///

A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.

#[serde(rename = "Events")] @@ -461,7 +460,7 @@ pub struct LookupEventsResponse { ///

Contains information about a returned public key.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PublicKey { ///

The fingerprint of the public key.

#[serde(rename = "Fingerprint")] @@ -497,7 +496,7 @@ pub struct PutEventSelectorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutEventSelectorsResponse { ///

Specifies the event selectors configured for your trail.

#[serde(rename = "EventSelectors")] @@ -523,12 +522,12 @@ pub struct RemoveTagsRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsResponse {} ///

Specifies the type and name of a resource referenced by an event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { ///

The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be "auto-scaling-test-group" for an Auto Scaling Group or "i-1234567" for an EC2 Instance.

#[serde(rename = "ResourceName")] @@ -542,7 +541,7 @@ pub struct Resource { ///

A resource tag.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceTag { ///

Specifies the ARN of the resource.

#[serde(rename = "ResourceId")] @@ -564,7 +563,7 @@ pub struct StartLoggingRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartLoggingResponse {} ///

Passes the request to CloudTrail to stop logging AWS API calls for the specified account.

@@ -577,7 +576,7 @@ pub struct StopLoggingRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopLoggingResponse {} ///

A custom key-value pair associated with a resource such as a CloudTrail trail.

@@ -594,7 +593,7 @@ pub struct Tag { ///

The settings for a trail.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Trail { ///

Specifies an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered.

#[serde(rename = "CloudWatchLogsLogGroupArn")] @@ -704,7 +703,7 @@ pub struct UpdateTrailRequest { ///

Returns the objects or data listed below if successful. Otherwise, returns an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTrailResponse { ///

Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

#[serde(rename = "CloudWatchLogsLogGroupArn")] @@ -2034,10 +2033,7 @@ impl CloudTrailClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudTrailClient { - CloudTrailClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2051,10 +2047,14 @@ impl CloudTrailClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudTrailClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudTrailClient { + CloudTrailClient { client, region } } } diff --git a/rusoto/services/cloudwatch/Cargo.toml b/rusoto/services/cloudwatch/Cargo.toml index f815ed82202..d9782be276e 100644 --- a/rusoto/services/cloudwatch/Cargo.toml +++ b/rusoto/services/cloudwatch/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cloudwatch" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cloudwatch/README.md b/rusoto/services/cloudwatch/README.md index 11dea400e7b..c8f26a34a95 100644 --- a/rusoto/services/cloudwatch/README.md +++ b/rusoto/services/cloudwatch/README.md @@ -23,9 +23,16 @@ To use `rusoto_cloudwatch` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_cloudwatch = "0.40.0" +rusoto_cloudwatch = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cloudwatch/src/custom/custom_tests.rs b/rusoto/services/cloudwatch/src/custom/custom_tests.rs index 5c042817406..9ba029c5931 100644 --- a/rusoto/services/cloudwatch/src/custom/custom_tests.rs +++ b/rusoto/services/cloudwatch/src/custom/custom_tests.rs @@ -1,13 +1,13 @@ extern crate rusoto_mock; -use crate::generated::{CloudWatch, CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum}; +use crate::generated::{CloudWatch, CloudWatchClient, Dimension, MetricDatum, PutMetricDataInput}; -use rusoto_core::Region; +use self::rusoto_mock::*; +use rusoto_core::param::Params; use rusoto_core::signature::SignedRequest; use rusoto_core::signature::SignedRequestPayload; -use rusoto_core::param::Params; +use rusoto_core::Region; use serde_urlencoded; -use self::rusoto_mock::*; #[test] fn should_serialize_complex_metric_data_params() { @@ -18,35 +18,46 @@ fn should_serialize_complex_metric_data_params() { assert_eq!("/", request.path); if let Some(SignedRequestPayload::Buffer(ref buffer)) = request.payload { let params: Params = serde_urlencoded::from_bytes(buffer).unwrap(); - assert_eq!(params.get("Namespace"), - Some(&Some("TestNamespace".to_owned()))); - assert_eq!(params.get("MetricData.member.1.MetricName"), - Some(&Some("buffers".to_owned()))); - assert_eq!(params.get("MetricData.member.1.Unit"), - Some(&Some("Bytes".to_owned()))); - assert_eq!(params.get("MetricData.member.1.Value"), - Some(&Some("1".to_owned()))); - assert_eq!(params.get("MetricData.member.1.Dimensions.member.1.Name"), - Some(&Some("foo".to_owned()))); - assert_eq!(params.get("MetricData.member.1.Dimensions.member.1.Value"), - Some(&Some("bar".to_owned()))); + assert_eq!( + params.get("Namespace"), + Some(&Some("TestNamespace".to_owned())) + ); + assert_eq!( + params.get("MetricData.member.1.MetricName"), + Some(&Some("buffers".to_owned())) + ); + assert_eq!( + params.get("MetricData.member.1.Unit"), + Some(&Some("Bytes".to_owned())) + ); + assert_eq!( + params.get("MetricData.member.1.Value"), + Some(&Some("1".to_owned())) + ); + assert_eq!( + params.get("MetricData.member.1.Dimensions.member.1.Name"), + Some(&Some("foo".to_owned())) + ); + assert_eq!( + params.get("MetricData.member.1.Dimensions.member.1.Value"), + Some(&Some("bar".to_owned())) + ); } else { panic!("Unexpected request.payload: {:?}", request.payload); } - }); let metric_data = vec![MetricDatum { - dimensions: Some(vec![Dimension { - name: "foo".to_string(), - value: "bar".to_string(), - }]), - metric_name: "buffers".to_string(), - statistic_values: None, - timestamp: None, - unit: Some("Bytes".to_string()), - value: Some(1.0), - ..Default::default() - }]; + dimensions: Some(vec![Dimension { + name: "foo".to_string(), + value: "bar".to_string(), + }]), + metric_name: "buffers".to_string(), + statistic_values: None, + timestamp: None, + unit: Some("Bytes".to_string()), + value: Some(1.0), + ..Default::default() + }]; let request = PutMetricDataInput { namespace: "TestNamespace".to_string(), metric_data: metric_data, diff --git a/rusoto/services/cloudwatch/src/custom/mod.rs b/rusoto/services/cloudwatch/src/custom/mod.rs index 9a14b939383..e4234693714 100644 --- a/rusoto/services/cloudwatch/src/custom/mod.rs +++ b/rusoto/services/cloudwatch/src/custom/mod.rs @@ -1,2 +1,2 @@ #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/cloudwatch/src/generated.rs b/rusoto/services/cloudwatch/src/generated.rs index 2f52864efdb..94527ba2d31 100644 --- a/rusoto/services/cloudwatch/src/generated.rs +++ b/rusoto/services/cloudwatch/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -159,6 +158,181 @@ impl AlarmNamesSerializer { } } +///

An anomaly detection model associated with a particular CloudWatch metric athresnd statistic. You can use the model to display a band of expected normal values when the metric is graphed.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct AnomalyDetector { + ///

The configuration specifies details about how the anomaly detection model is to be trained, including time ranges to exclude from use for training the model, and the time zone to use for the metric.

+ pub configuration: Option, + ///

The metric dimensions associated with the anomaly detection model.

+ pub dimensions: Option>, + ///

The name of the metric associated with the anomaly detection model.

+ pub metric_name: Option, + ///

The namespace of the metric associated with the anomaly detection model.

+ pub namespace: Option, + ///

The statistic associated with the anomaly detection model.

+ pub stat: Option, +} + +struct AnomalyDetectorDeserializer; +impl AnomalyDetectorDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, AnomalyDetector, _>(tag_name, stack, |name, stack, obj| { + match name { + "Configuration" => { + obj.configuration = + Some(AnomalyDetectorConfigurationDeserializer::deserialize( + "Configuration", + stack, + )?); + } + "Dimensions" => { + obj.dimensions + .get_or_insert(vec![]) + .extend(DimensionsDeserializer::deserialize("Dimensions", stack)?); + } + "MetricName" => { + obj.metric_name = + Some(MetricNameDeserializer::deserialize("MetricName", stack)?); + } + "Namespace" => { + obj.namespace = Some(NamespaceDeserializer::deserialize("Namespace", stack)?); + } + "Stat" => { + obj.stat = Some(StatDeserializer::deserialize("Stat", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +///

The configuration specifies details about how the anomaly detection model is to be trained, including time ranges to exclude from use for training the model and the time zone to use for the metric.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct AnomalyDetectorConfiguration { + ///

An array of time ranges to exclude from use when the anomaly detection model is trained. Use this to make sure that events that could cause unusual values for the metric, such as deployments, aren't used when CloudWatch creates the model.

+ pub excluded_time_ranges: Option>, + ///

The time zone to use for the metric. This is useful to enable the model to automatically account for daylight savings time changes if the metric is sensitive to such time changes.

To specify a time zone, use the name of the time zone as specified in the standard tz database. For more information, see tz database.

+ pub metric_timezone: Option, +} + +struct AnomalyDetectorConfigurationDeserializer; +impl AnomalyDetectorConfigurationDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, AnomalyDetectorConfiguration, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "ExcludedTimeRanges" => { + obj.excluded_time_ranges.get_or_insert(vec![]).extend( + AnomalyDetectorExcludedTimeRangesDeserializer::deserialize( + "ExcludedTimeRanges", + stack, + )?, + ); + } + "MetricTimezone" => { + obj.metric_timezone = + Some(AnomalyDetectorMetricTimezoneDeserializer::deserialize( + "MetricTimezone", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `AnomalyDetectorConfiguration` contents to a `SignedRequest`. +struct AnomalyDetectorConfigurationSerializer; +impl AnomalyDetectorConfigurationSerializer { + fn serialize(params: &mut Params, name: &str, obj: &AnomalyDetectorConfiguration) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.excluded_time_ranges { + AnomalyDetectorExcludedTimeRangesSerializer::serialize( + params, + &format!("{}{}", prefix, "ExcludedTimeRanges"), + field_value, + ); + } + if let Some(ref field_value) = obj.metric_timezone { + params.put(&format!("{}{}", prefix, "MetricTimezone"), &field_value); + } + } +} + +struct AnomalyDetectorExcludedTimeRangesDeserializer; +impl AnomalyDetectorExcludedTimeRangesDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "member" { + obj.push(RangeDeserializer::deserialize("member", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} + +/// Serialize `AnomalyDetectorExcludedTimeRanges` contents to a `SignedRequest`. +struct AnomalyDetectorExcludedTimeRangesSerializer; +impl AnomalyDetectorExcludedTimeRangesSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.member.{}", name, index + 1); + RangeSerializer::serialize(params, &key, obj); + } + } +} + +struct AnomalyDetectorMetricTimezoneDeserializer; +impl AnomalyDetectorMetricTimezoneDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} +struct AnomalyDetectorsDeserializer; +impl AnomalyDetectorsDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "member" { + obj.push(AnomalyDetectorDeserializer::deserialize("member", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} struct ComparisonOperatorDeserializer; impl ComparisonOperatorDeserializer { #[allow(unused_variables)] @@ -525,6 +699,59 @@ impl DeleteAlarmsInputSerializer { } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteAnomalyDetectorInput { + ///

The metric dimensions associated with the anomaly detection model to delete.

+ pub dimensions: Option>, + ///

The metric name associated with the anomaly detection model to delete.

+ pub metric_name: String, + ///

The namespace associated with the anomaly detection model to delete.

+ pub namespace: String, + ///

The statistic associated with the anomaly detection model to delete.

+ pub stat: String, +} + +/// Serialize `DeleteAnomalyDetectorInput` contents to a `SignedRequest`. +struct DeleteAnomalyDetectorInputSerializer; +impl DeleteAnomalyDetectorInputSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DeleteAnomalyDetectorInput) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dimensions { + DimensionsSerializer::serialize( + params, + &format!("{}{}", prefix, "Dimensions"), + field_value, + ); + } + params.put(&format!("{}{}", prefix, "MetricName"), &obj.metric_name); + params.put(&format!("{}{}", prefix, "Namespace"), &obj.namespace); + params.put(&format!("{}{}", prefix, "Stat"), &obj.stat); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteAnomalyDetectorOutput {} + +struct DeleteAnomalyDetectorOutputDeserializer; +impl DeleteAnomalyDetectorOutputDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + start_element(tag_name, stack)?; + + let obj = DeleteAnomalyDetectorOutput::default(); + + end_element(tag_name, stack)?; + + Ok(obj) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDashboardsInput { ///

The dashboards to be deleted. This parameter is required.

@@ -811,6 +1038,87 @@ impl DescribeAlarmsOutputDeserializer { }) } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeAnomalyDetectorsInput { + ///

Limits the results to only the anomaly detection models that are associated with the specified metric dimensions. If there are multiple metrics that have these dimensions and have anomaly detection models associated, they're all returned.

+ pub dimensions: Option>, + ///

The maximum number of results to return in one operation. The maximum value you can specify is 10.

To retrieve the remaining results, make another call with the returned NextToken value.

+ pub max_results: Option, + ///

Limits the results to only the anomaly detection models that are associated with the specified metric name. If there are multiple metrics with this name in different namespaces that have anomaly detection models, they're all returned.

+ pub metric_name: Option, + ///

Limits the results to only the anomaly detection models that are associated with the specified namespace.

+ pub namespace: Option, + ///

Use the token returned by the previous operation to request the next page of results.

+ pub next_token: Option, +} + +/// Serialize `DescribeAnomalyDetectorsInput` contents to a `SignedRequest`. +struct DescribeAnomalyDetectorsInputSerializer; +impl DescribeAnomalyDetectorsInputSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeAnomalyDetectorsInput) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dimensions { + DimensionsSerializer::serialize( + params, + &format!("{}{}", prefix, "Dimensions"), + field_value, + ); + } + if let Some(ref field_value) = obj.max_results { + params.put(&format!("{}{}", prefix, "MaxResults"), &field_value); + } + if let Some(ref field_value) = obj.metric_name { + params.put(&format!("{}{}", prefix, "MetricName"), &field_value); + } + if let Some(ref field_value) = obj.namespace { + params.put(&format!("{}{}", prefix, "Namespace"), &field_value); + } + if let Some(ref field_value) = obj.next_token { + params.put(&format!("{}{}", prefix, "NextToken"), &field_value); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeAnomalyDetectorsOutput { + ///

The list of anomaly detection models returned by the operation.

+ pub anomaly_detectors: Option>, + ///

A token that you can use in a subsequent operation to retrieve the next set of results.

+ pub next_token: Option, +} + +struct DescribeAnomalyDetectorsOutputDeserializer; +impl DescribeAnomalyDetectorsOutputDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DescribeAnomalyDetectorsOutput, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "AnomalyDetectors" => { + obj.anomaly_detectors.get_or_insert(vec![]).extend( + AnomalyDetectorsDeserializer::deserialize("AnomalyDetectors", stack)?, + ); + } + "NextToken" => { + obj.next_token = + Some(NextTokenDeserializer::deserialize("NextToken", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} ///

Expands the identity of a metric.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Dimension { @@ -1101,7 +1409,7 @@ impl GetDashboardOutputDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct GetMetricDataInput { - ///

The time stamp indicating the latest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the EndTime.

+ ///

The time stamp indicating the latest data to be returned.

The value specified is exclusive; results include data points up to the specified time stamp.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the EndTime.

pub end_time: String, ///

The maximum number of data points the request should return before paginating. If you omit this, the default of 100,800 is used.

pub max_datapoints: Option, @@ -1111,7 +1419,7 @@ pub struct GetMetricDataInput { pub next_token: Option, ///

The order in which data points should be returned. TimestampDescending returns the newest data first and paginates when the MaxDatapoints limit is reached. TimestampAscending returns the oldest data first and paginates when the MaxDatapoints limit is reached.

pub scan_by: Option, - ///

The time stamp indicating the earliest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime.

+ ///

The time stamp indicating the earliest data to be returned.

The value specified is inclusive; results include data points with the specified time stamp.

CloudWatch rounds the specified time stamp as follows:

  • Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.

  • Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.

  • Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.

If you set Period to 5, 10, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime.

pub start_time: String, } @@ -1199,7 +1507,7 @@ pub struct GetMetricStatisticsInput { pub start_time: String, ///

The metric statistics, other than percentile. For percentile statistics, use ExtendedStatistics. When calling GetMetricStatistics, you must specify either Statistics or ExtendedStatistics, but not both.

pub statistics: Option>, - ///

The unit for a given metric. Metrics may be reported in multiple units. Not supplying a unit results in all units being returned. If you specify only a unit that the metric does not report, the results of the call are null.

+ ///

The unit for a given metric. If you omit Unit, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

pub unit: Option, } @@ -1711,9 +2019,9 @@ pub struct MetricAlarm { pub extended_statistic: Option, ///

The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

pub insufficient_data_actions: Option>, - ///

The name of the metric associated with the alarm.

+ ///

The name of the metric associated with the alarm, if this is an alarm based on a single metric.

pub metric_name: Option, - ///

+ ///

An array of MetricDataQuery structures, used in an alarm based on a metric math expression. Each structure either retrieves a metric or performs a math expression. One item in the Metrics array is the math expression that the alarm watches. This expression by designated by having ReturnValue set to true.

pub metrics: Option>, ///

The namespace of the metric associated with the alarm.

pub namespace: Option, @@ -1733,6 +2041,8 @@ pub struct MetricAlarm { pub statistic: Option, ///

The value to compare with the specified statistic.

pub threshold: Option, + ///

In an alarm based on an anomaly detection model, this is the ID of the ANOMALY_DETECTION_BAND function used as the threshold for the alarm.

+ pub threshold_metric_id: Option, ///

Sets how this alarm is to handle missing data points. If this parameter is omitted, the default behavior of missing is used.

pub treat_missing_data: Option, ///

The unit of the metric associated with the alarm.

@@ -1865,6 +2175,12 @@ impl MetricAlarmDeserializer { "Threshold" => { obj.threshold = Some(ThresholdDeserializer::deserialize("Threshold", stack)?); } + "ThresholdMetricId" => { + obj.threshold_metric_id = Some(MetricIdDeserializer::deserialize( + "ThresholdMetricId", + stack, + )?); + } "TreatMissingData" => { obj.treat_missing_data = Some(TreatMissingDataDeserializer::deserialize( "TreatMissingData", @@ -2124,7 +2440,7 @@ pub struct MetricDatum { pub storage_resolution: Option, ///

The time the metric data was received, expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

pub timestamp: Option, - ///

The unit of the metric.

+ ///

When you are using a Put operation, this defines what unit you want to use when storing the metric.

In a Get operation, this displays the unit that is used for the metric.

pub unit: Option, ///

The value for the metric.

Although the parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

pub value: Option, @@ -2230,7 +2546,7 @@ pub struct MetricStat { pub period: i64, ///

The statistic to return. It can include any CloudWatch statistic or extended statistic.

pub stat: String, - ///

The unit to use for the returned data points.

+ ///

When you are using a Put operation, this defines what unit you want to use when storing the metric.

In a Get operation, if you omit Unit then all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

pub unit: Option, } @@ -2345,41 +2661,103 @@ impl PeriodDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] -pub struct PutDashboardInput { - ///

The detailed information about the dashboard in JSON format, including the widgets to include and their location on the dashboard. This parameter is required.

For more information about the syntax, see CloudWatch-Dashboard-Body-Structure.

- pub dashboard_body: String, - ///

The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, "-", and "_". This parameter is required.

- pub dashboard_name: String, +pub struct PutAnomalyDetectorInput { + ///

The configuration specifies details about how the anomaly detection model is to be trained, including time ranges to exclude when training and updating the model. You can specify as many as 10 time ranges.

The configuration can also include the time zone to use for the metric.

You can in

+ pub configuration: Option, + ///

The metric dimensions to create the anomaly detection model for.

+ pub dimensions: Option>, + ///

The name of the metric to create the anomaly detection model for.

+ pub metric_name: String, + ///

The namespace of the metric to create the anomaly detection model for.

+ pub namespace: String, + ///

The statistic to use for the metric and the anomaly detection model.

+ pub stat: String, } -/// Serialize `PutDashboardInput` contents to a `SignedRequest`. -struct PutDashboardInputSerializer; -impl PutDashboardInputSerializer { - fn serialize(params: &mut Params, name: &str, obj: &PutDashboardInput) { +/// Serialize `PutAnomalyDetectorInput` contents to a `SignedRequest`. +struct PutAnomalyDetectorInputSerializer; +impl PutAnomalyDetectorInputSerializer { + fn serialize(params: &mut Params, name: &str, obj: &PutAnomalyDetectorInput) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } - params.put( - &format!("{}{}", prefix, "DashboardBody"), - &obj.dashboard_body, - ); - params.put( - &format!("{}{}", prefix, "DashboardName"), - &obj.dashboard_name, - ); + if let Some(ref field_value) = obj.configuration { + AnomalyDetectorConfigurationSerializer::serialize( + params, + &format!("{}{}", prefix, "Configuration"), + field_value, + ); + } + if let Some(ref field_value) = obj.dimensions { + DimensionsSerializer::serialize( + params, + &format!("{}{}", prefix, "Dimensions"), + field_value, + ); + } + params.put(&format!("{}{}", prefix, "MetricName"), &obj.metric_name); + params.put(&format!("{}{}", prefix, "Namespace"), &obj.namespace); + params.put(&format!("{}{}", prefix, "Stat"), &obj.stat); } } #[derive(Default, Debug, Clone, PartialEq)] -pub struct PutDashboardOutput { - ///

If the input for PutDashboard was correct and the dashboard was successfully created or modified, this result is empty.

If this result includes only warning messages, then the input was valid enough for the dashboard to be created or modified, but some elements of the dashboard may not render.

If this result includes error messages, the input was not valid and the operation failed.

- pub dashboard_validation_messages: Option>, -} +pub struct PutAnomalyDetectorOutput {} -struct PutDashboardOutputDeserializer; -impl PutDashboardOutputDeserializer { +struct PutAnomalyDetectorOutputDeserializer; +impl PutAnomalyDetectorOutputDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + start_element(tag_name, stack)?; + + let obj = PutAnomalyDetectorOutput::default(); + + end_element(tag_name, stack)?; + + Ok(obj) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct PutDashboardInput { + ///

The detailed information about the dashboard in JSON format, including the widgets to include and their location on the dashboard. This parameter is required.

For more information about the syntax, see CloudWatch-Dashboard-Body-Structure.

+ pub dashboard_body: String, + ///

The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, "-", and "_". This parameter is required.

+ pub dashboard_name: String, +} + +/// Serialize `PutDashboardInput` contents to a `SignedRequest`. +struct PutDashboardInputSerializer; +impl PutDashboardInputSerializer { + fn serialize(params: &mut Params, name: &str, obj: &PutDashboardInput) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put( + &format!("{}{}", prefix, "DashboardBody"), + &obj.dashboard_body, + ); + params.put( + &format!("{}{}", prefix, "DashboardName"), + &obj.dashboard_name, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct PutDashboardOutput { + ///

If the input for PutDashboard was correct and the dashboard was successfully created or modified, this result is empty.

If this result includes only warning messages, then the input was valid enough for the dashboard to be created or modified, but some elements of the dashboard may not render.

If this result includes error messages, the input was not valid and the operation failed.

+ pub dashboard_validation_messages: Option>, +} + +struct PutDashboardOutputDeserializer; +impl PutDashboardOutputDeserializer { #[allow(unused_variables)] fn deserialize( tag_name: &str, @@ -2403,7 +2781,7 @@ impl PutDashboardOutputDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct PutMetricAlarmInput { - ///

Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE.

+ ///

Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE.

pub actions_enabled: Option, ///

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

pub alarm_actions: Option>, @@ -2411,7 +2789,7 @@ pub struct PutMetricAlarmInput { pub alarm_description: Option, ///

The name for the alarm. This name must be unique within your AWS account.

pub alarm_name: String, - ///

The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.

+ ///

The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.

The values LessThanLowerOrGreaterThanUpperThreshold, LessThanLowerThreshold, and GreaterThanUpperThreshold are used only for alarms based on anomaly detection models.

pub comparison_operator: String, ///

The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an "M out of N" alarm. In that case, this value is the M. For more information, see Evaluating an Alarm in the Amazon CloudWatch User Guide.

pub datapoints_to_alarm: Option, @@ -2425,25 +2803,27 @@ pub struct PutMetricAlarmInput { pub extended_statistic: Option, ///

The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): >arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

pub insufficient_data_actions: Option>, - ///

The name for the metric associated with the alarm.

If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters. Instead, you specify all this information in the Metrics array.

+ ///

The name for the metric associated with the alarm. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array.

If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters. Instead, you specify all this information in the Metrics array.

pub metric_name: Option, - ///

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. Each item in the Metrics array either retrieves a metric or performs a math expression.

One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnValue to true for this object in the array. For more information, see MetricDataQuery.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

+ ///

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array.

Each item in the Metrics array either retrieves a metric or performs a math expression.

One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnValue to true for this object in the array. For more information, see MetricDataQuery.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

pub metrics: Option>, ///

The namespace for the metric associated specified in MetricName.

pub namespace: Option, ///

The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

pub ok_actions: Option>, - ///

The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.

Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.

+ ///

The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.

Period is required for alarms based on static thresholds. If you are creating an alarm based on a metric math expression, you specify the period for each metric within the objects in the Metrics array.

Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.

pub period: Option, ///

The statistic for the metric specified in MetricName, other than percentile. For percentile statistics, use ExtendedStatistic. When you call PutMetricAlarm and specify a MetricName, you must specify either Statistic or ExtendedStatistic, but not both.

pub statistic: Option, ///

A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

pub tags: Option>, - ///

The value against which the specified statistic is compared.

- pub threshold: f64, + ///

The value against which the specified statistic is compared.

This parameter is required for alarms based on static thresholds, but should not be used for alarms based on anomaly detection models.

+ pub threshold: Option, + ///

If this is an alarm based on an anomaly detection model, make this value match the ID of the ANOMALY_DETECTION_BAND function.

For an example of how to use this parameter, see the Anomaly Detection Model Alarm example on this page.

If your alarm uses this parameter, it cannot have Auto Scaling actions.

+ pub threshold_metric_id: Option, ///

Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data.

Valid Values: breaching | notBreaching | ignore | missing

pub treat_missing_data: Option, - ///

The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the CloudWatch alarm can get stuck in the INSUFFICIENT DATA state.

+ ///

The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually metrics are published with only one unit, so the alarm will work as intended.

However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and will behave un-predictably.

We recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT DATA state.

pub unit: Option, } @@ -2533,7 +2913,12 @@ impl PutMetricAlarmInputSerializer { if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tags"), field_value); } - params.put(&format!("{}{}", prefix, "Threshold"), &obj.threshold); + if let Some(ref field_value) = obj.threshold { + params.put(&format!("{}{}", prefix, "Threshold"), &field_value); + } + if let Some(ref field_value) = obj.threshold_metric_id { + params.put(&format!("{}{}", prefix, "ThresholdMetricId"), &field_value); + } if let Some(ref field_value) = obj.treat_missing_data { params.put(&format!("{}{}", prefix, "TreatMissingData"), &field_value); } @@ -2547,7 +2932,7 @@ impl PutMetricAlarmInputSerializer { pub struct PutMetricDataInput { ///

The data for the metric. The array can include no more than 20 metrics per call.

pub metric_data: Vec, - ///

The namespace for the metric data.

You cannot specify a namespace that begins with "AWS/". Namespaces that begin with "AWS/" are reserved for use by Amazon Web Services products.

+ ///

The namespace for the metric data.

To avoid conflicts with AWS service namespaces, you should not specify a namespace that begins with AWS/

pub namespace: String, } @@ -2569,6 +2954,48 @@ impl PutMetricDataInputSerializer { } } +///

Specifies one range of days or times to exclude from use for training an anomaly detection model.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Range { + ///

The end time of the range to exclude. The format is yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59.

+ pub end_time: String, + ///

The start time of the range to exclude. The format is yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59.

+ pub start_time: String, +} + +struct RangeDeserializer; +impl RangeDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + deserialize_elements::<_, Range, _>(tag_name, stack, |name, stack, obj| { + match name { + "EndTime" => { + obj.end_time = TimestampDeserializer::deserialize("EndTime", stack)?; + } + "StartTime" => { + obj.start_time = TimestampDeserializer::deserialize("StartTime", stack)?; + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} + +/// Serialize `Range` contents to a `SignedRequest`. +struct RangeSerializer; +impl RangeSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Range) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put(&format!("{}{}", prefix, "EndTime"), &obj.end_time); + params.put(&format!("{}{}", prefix, "StartTime"), &obj.start_time); + } +} + struct ResourceListDeserializer; impl ResourceListDeserializer { #[allow(unused_variables)] @@ -3074,6 +3501,79 @@ impl Error for DeleteAlarmsError { } } } +/// Errors returned by DeleteAnomalyDetector +#[derive(Debug, PartialEq)] +pub enum DeleteAnomalyDetectorError { + ///

Request processing has failed due to some unknown error, exception, or failure.

+ InternalServiceFault(String), + ///

The value of an input parameter is bad or out-of-range.

+ InvalidParameterValue(String), + ///

An input parameter that is required is missing.

+ MissingRequiredParameter(String), + ///

The named resource does not exist.

+ ResourceNotFound(String), +} + +impl DeleteAnomalyDetectorError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "InternalServiceError" => { + return RusotoError::Service( + DeleteAnomalyDetectorError::InternalServiceFault(parsed_error.message), + ) + } + "InvalidParameterValue" => { + return RusotoError::Service( + DeleteAnomalyDetectorError::InvalidParameterValue(parsed_error.message), + ) + } + "MissingParameter" => { + return RusotoError::Service( + DeleteAnomalyDetectorError::MissingRequiredParameter( + parsed_error.message, + ), + ) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteAnomalyDetectorError::ResourceNotFound( + parsed_error.message, + )) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DeleteAnomalyDetectorError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteAnomalyDetectorError { + fn description(&self) -> &str { + match *self { + DeleteAnomalyDetectorError::InternalServiceFault(ref cause) => cause, + DeleteAnomalyDetectorError::InvalidParameterValue(ref cause) => cause, + DeleteAnomalyDetectorError::MissingRequiredParameter(ref cause) => cause, + DeleteAnomalyDetectorError::ResourceNotFound(ref cause) => cause, + } + } +} /// Errors returned by DeleteDashboards #[derive(Debug, PartialEq)] pub enum DeleteDashboardsError { @@ -3268,6 +3768,73 @@ impl Error for DescribeAlarmsForMetricError { match *self {} } } +/// Errors returned by DescribeAnomalyDetectors +#[derive(Debug, PartialEq)] +pub enum DescribeAnomalyDetectorsError { + ///

Request processing has failed due to some unknown error, exception, or failure.

+ InternalServiceFault(String), + ///

The next token specified is invalid.

+ InvalidNextToken(String), + ///

The value of an input parameter is bad or out-of-range.

+ InvalidParameterValue(String), +} + +impl DescribeAnomalyDetectorsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "InternalServiceError" => { + return RusotoError::Service( + DescribeAnomalyDetectorsError::InternalServiceFault( + parsed_error.message, + ), + ) + } + "InvalidNextToken" => { + return RusotoError::Service( + DescribeAnomalyDetectorsError::InvalidNextToken(parsed_error.message), + ) + } + "InvalidParameterValue" => { + return RusotoError::Service( + DescribeAnomalyDetectorsError::InvalidParameterValue( + parsed_error.message, + ), + ) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DescribeAnomalyDetectorsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeAnomalyDetectorsError { + fn description(&self) -> &str { + match *self { + DescribeAnomalyDetectorsError::InternalServiceFault(ref cause) => cause, + DescribeAnomalyDetectorsError::InvalidNextToken(ref cause) => cause, + DescribeAnomalyDetectorsError::InvalidParameterValue(ref cause) => cause, + } + } +} /// Errors returned by DisableAlarmActions #[derive(Debug, PartialEq)] pub enum DisableAlarmActionsError {} @@ -3737,6 +4304,77 @@ impl Error for ListTagsForResourceError { } } } +/// Errors returned by PutAnomalyDetector +#[derive(Debug, PartialEq)] +pub enum PutAnomalyDetectorError { + ///

Request processing has failed due to some unknown error, exception, or failure.

+ InternalServiceFault(String), + ///

The value of an input parameter is bad or out-of-range.

+ InvalidParameterValue(String), + ///

The operation exceeded one or more limits.

+ LimitExceeded(String), + ///

An input parameter that is required is missing.

+ MissingRequiredParameter(String), +} + +impl PutAnomalyDetectorError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "InternalServiceError" => { + return RusotoError::Service(PutAnomalyDetectorError::InternalServiceFault( + parsed_error.message, + )) + } + "InvalidParameterValue" => { + return RusotoError::Service( + PutAnomalyDetectorError::InvalidParameterValue(parsed_error.message), + ) + } + "LimitExceededException" => { + return RusotoError::Service(PutAnomalyDetectorError::LimitExceeded( + parsed_error.message, + )) + } + "MissingParameter" => { + return RusotoError::Service( + PutAnomalyDetectorError::MissingRequiredParameter(parsed_error.message), + ) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for PutAnomalyDetectorError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutAnomalyDetectorError { + fn description(&self) -> &str { + match *self { + PutAnomalyDetectorError::InternalServiceFault(ref cause) => cause, + PutAnomalyDetectorError::InvalidParameterValue(ref cause) => cause, + PutAnomalyDetectorError::LimitExceeded(ref cause) => cause, + PutAnomalyDetectorError::MissingRequiredParameter(ref cause) => cause, + } + } +} /// Errors returned by PutDashboard #[derive(Debug, PartialEq)] pub enum PutDashboardError { @@ -4109,9 +4747,15 @@ impl Error for UntagResourceError { } /// Trait representing the capabilities of the CloudWatch API. CloudWatch clients implement this trait. pub trait CloudWatch { - ///

Deletes the specified alarms. In the event of an error, no alarms are deleted.

+ ///

Deletes the specified alarms. You can delete up to 50 alarms in one operation. In the event of an error, no alarms are deleted.

fn delete_alarms(&self, input: DeleteAlarmsInput) -> RusotoFuture<(), DeleteAlarmsError>; + ///

Deletes the specified anomaly detection model from your account.

+ fn delete_anomaly_detector( + &self, + input: DeleteAnomalyDetectorInput, + ) -> RusotoFuture; + ///

Deletes all dashboards that you specify. You may specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.

fn delete_dashboards( &self, @@ -4136,6 +4780,12 @@ pub trait CloudWatch { input: DescribeAlarmsForMetricInput, ) -> RusotoFuture; + ///

Lists the anomaly detection models that you have created in your account. You can list all models in your account or filter the results to only the models that are related to a certain namespace, metric name, or metric dimension.

+ fn describe_anomaly_detectors( + &self, + input: DescribeAnomalyDetectorsInput, + ) -> RusotoFuture; + ///

Disables the actions for the specified alarms. When an alarm's actions are disabled, the alarm actions do not execute when the alarm state changes.

fn disable_alarm_actions( &self, @@ -4154,7 +4804,7 @@ pub trait CloudWatch { input: GetDashboardInput, ) -> RusotoFuture; - ///

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

+ ///

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

If you omit Unit in your request, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

fn get_metric_data( &self, input: GetMetricDataInput, @@ -4190,13 +4840,19 @@ pub trait CloudWatch { input: ListTagsForResourceInput, ) -> RusotoFuture; - ///

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

There is no limit to the number of dashboards in your account. All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

+ ///

Creates an anomaly detection model for a CloudWatch metric. You can use the model to display a band of expected normal values when the metric is graphed.

For more information, see CloudWatch Anomaly Detection.

+ fn put_anomaly_detector( + &self, + input: PutAnomalyDetectorInput, + ) -> RusotoFuture; + + ///

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

fn put_dashboard( &self, input: PutDashboardInput, ) -> RusotoFuture; - ///

Creates or updates an alarm and associates it with the specified metric or metric math expression.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

  • iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on EC2 instance status metrics

  • ec2:StopInstances for alarms with stop actions

  • ec2:TerminateInstances for alarms with terminate actions

  • No specific permissions are needed for alarms with recover actions

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

+ ///

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

  • iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on EC2 instance status metrics

  • ec2:StopInstances for alarms with stop actions

  • ec2:TerminateInstances for alarms with terminate actions

  • No specific permissions are needed for alarms with recover actions

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

fn put_metric_alarm(&self, input: PutMetricAlarmInput) -> RusotoFuture<(), PutMetricAlarmError>; @@ -4230,10 +4886,7 @@ impl CloudWatchClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudWatchClient { - CloudWatchClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4247,15 +4900,19 @@ impl CloudWatchClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudWatchClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudWatchClient { + CloudWatchClient { client, region } } } impl CloudWatch for CloudWatchClient { - ///

Deletes the specified alarms. In the event of an error, no alarms are deleted.

+ ///

Deletes the specified alarms. You can delete up to 50 alarms in one operation. In the event of an error, no alarms are deleted.

fn delete_alarms(&self, input: DeleteAlarmsInput) -> RusotoFuture<(), DeleteAlarmsError> { let mut request = SignedRequest::new("POST", "monitoring", &self.region, "/"); let mut params = Params::new(); @@ -4280,6 +4937,54 @@ impl CloudWatch for CloudWatchClient { }) } + ///

Deletes the specified anomaly detection model from your account.

+ fn delete_anomaly_detector( + &self, + input: DeleteAnomalyDetectorInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "monitoring", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DeleteAnomalyDetector"); + params.put("Version", "2010-08-01"); + DeleteAnomalyDetectorInputSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteAnomalyDetectorError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DeleteAnomalyDetectorOutput::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = DeleteAnomalyDetectorOutputDeserializer::deserialize( + "DeleteAnomalyDetectorResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Deletes all dashboards that you specify. You may specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.

fn delete_dashboards( &self, @@ -4312,7 +5017,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4362,7 +5067,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4413,7 +5118,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4461,7 +5166,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4480,6 +5185,54 @@ impl CloudWatch for CloudWatchClient { }) } + ///

Lists the anomaly detection models that you have created in your account. You can list all models in your account or filter the results to only the models that are related to a certain namespace, metric name, or metric dimension.

+ fn describe_anomaly_detectors( + &self, + input: DescribeAnomalyDetectorsInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "monitoring", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DescribeAnomalyDetectors"); + params.put("Version", "2010-08-01"); + DescribeAnomalyDetectorsInputSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeAnomalyDetectorsError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DescribeAnomalyDetectorsOutput::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = DescribeAnomalyDetectorsOutputDeserializer::deserialize( + "DescribeAnomalyDetectorsResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Disables the actions for the specified alarms. When an alarm's actions are disabled, the alarm actions do not execute when the alarm state changes.

fn disable_alarm_actions( &self, @@ -4567,7 +5320,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4586,7 +5339,7 @@ impl CloudWatch for CloudWatchClient { }) } - ///

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

+ ///

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

If you omit Unit in your request, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

fn get_metric_data( &self, input: GetMetricDataInput, @@ -4618,7 +5371,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4668,7 +5421,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4718,7 +5471,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4769,7 +5522,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4820,7 +5573,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4870,7 +5623,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4889,7 +5642,58 @@ impl CloudWatch for CloudWatchClient { }) } - ///

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

There is no limit to the number of dashboards in your account. All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

+ ///

Creates an anomaly detection model for a CloudWatch metric. You can use the model to display a band of expected normal values when the metric is graphed.

For more information, see CloudWatch Anomaly Detection.

+ fn put_anomaly_detector( + &self, + input: PutAnomalyDetectorInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "monitoring", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "PutAnomalyDetector"); + params.put("Version", "2010-08-01"); + PutAnomalyDetectorInputSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(PutAnomalyDetectorError::from_response(response))), + ); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = PutAnomalyDetectorOutput::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = PutAnomalyDetectorOutputDeserializer::deserialize( + "PutAnomalyDetectorResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

fn put_dashboard( &self, input: PutDashboardInput, @@ -4921,7 +5725,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -4940,7 +5744,7 @@ impl CloudWatch for CloudWatchClient { }) } - ///

Creates or updates an alarm and associates it with the specified metric or metric math expression.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

  • iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on EC2 instance status metrics

  • ec2:StopInstances for alarms with stop actions

  • ec2:TerminateInstances for alarms with terminate actions

  • No specific permissions are needed for alarms with recover actions

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

+ ///

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

  • iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on EC2 instance status metrics

  • ec2:StopInstances for alarms with stop actions

  • ec2:TerminateInstances for alarms with terminate actions

  • No specific permissions are needed for alarms with recover actions

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

fn put_metric_alarm( &self, input: PutMetricAlarmInput, @@ -5050,7 +5854,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5101,7 +5905,7 @@ impl CloudWatch for CloudWatchClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/codebuild/Cargo.toml b/rusoto/services/codebuild/Cargo.toml index 1b916c4062d..17a34b918e0 100644 --- a/rusoto/services/codebuild/Cargo.toml +++ b/rusoto/services/codebuild/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_codebuild" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/codebuild/README.md b/rusoto/services/codebuild/README.md index 679e2a9cf20..ea0a45b6a36 100644 --- a/rusoto/services/codebuild/README.md +++ b/rusoto/services/codebuild/README.md @@ -23,9 +23,16 @@ To use `rusoto_codebuild` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_codebuild = "0.40.0" +rusoto_codebuild = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/codebuild/src/custom/mod.rs b/rusoto/services/codebuild/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/codebuild/src/custom/mod.rs +++ b/rusoto/services/codebuild/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/codebuild/src/generated.rs b/rusoto/services/codebuild/src/generated.rs index 892f97da921..50ce773bf24 100644 --- a/rusoto/services/codebuild/src/generated.rs +++ b/rusoto/services/codebuild/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct BatchDeleteBuildsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteBuildsOutput { ///

The IDs of the builds that were successfully deleted.

#[serde(rename = "buildsDeleted")] @@ -52,7 +51,7 @@ pub struct BatchGetBuildsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetBuildsOutput { ///

Information about the requested builds.

#[serde(rename = "builds")] @@ -72,7 +71,7 @@ pub struct BatchGetProjectsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetProjectsOutput { ///

Information about the requested build projects.

#[serde(rename = "projects")] @@ -86,7 +85,7 @@ pub struct BatchGetProjectsOutput { ///

Information about a build.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Build { ///

The Amazon Resource Name (ARN) of the build.

#[serde(rename = "arn")] @@ -176,7 +175,7 @@ pub struct Build { #[serde(rename = "source")] #[serde(skip_serializing_if = "Option::is_none")] pub source: Option, - ///

Any version identifier for the version of the source code to be built.

+ ///

Any version identifier for the version of the source code to be built. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

#[serde(rename = "sourceVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub source_version: Option, @@ -196,7 +195,7 @@ pub struct Build { ///

Information about build output artifacts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BuildArtifacts { ///

An identifier for this artifact definition.

#[serde(rename = "artifactIdentifier")] @@ -226,7 +225,7 @@ pub struct BuildArtifacts { ///

Information about a build that could not be successfully deleted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BuildNotDeleted { ///

The ID of the build that could not be successfully deleted.

#[serde(rename = "id")] @@ -240,7 +239,7 @@ pub struct BuildNotDeleted { ///

Information about a stage for a build.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BuildPhase { ///

Additional information about a build phase, especially to help troubleshoot a failed build.

#[serde(rename = "contexts")] @@ -323,6 +322,10 @@ pub struct CreateProjectInput { #[serde(rename = "secondaryArtifacts")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_artifacts: Option>, + ///

An array of ProjectSourceVersion objects. If secondarySourceVersions is specified at the build level, then they take precedence over these secondarySourceVersions (at the project level).

+ #[serde(rename = "secondarySourceVersions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub secondary_source_versions: Option>, ///

An array of ProjectSource objects.

#[serde(rename = "secondarySources")] #[serde(skip_serializing_if = "Option::is_none")] @@ -333,6 +336,10 @@ pub struct CreateProjectInput { ///

Information about the build input source code for the build project.

#[serde(rename = "source")] pub source: ProjectSource, + ///

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

+ #[serde(rename = "sourceVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_version: Option, ///

A set of tags for this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -348,7 +355,7 @@ pub struct CreateProjectInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProjectOutput { ///

Information about the build project that was created.

#[serde(rename = "project")] @@ -372,7 +379,7 @@ pub struct CreateWebhookInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWebhookOutput { ///

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

#[serde(rename = "webhook")] @@ -388,7 +395,7 @@ pub struct DeleteProjectInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProjectOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -399,7 +406,7 @@ pub struct DeleteSourceCredentialsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSourceCredentialsOutput { ///

The Amazon Resource Name (ARN) of the token.

#[serde(rename = "arn")] @@ -415,12 +422,12 @@ pub struct DeleteWebhookInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWebhookOutput {} ///

Information about a Docker image that is managed by AWS CodeBuild.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentImage { ///

The description of the Docker image.

#[serde(rename = "description")] @@ -438,7 +445,7 @@ pub struct EnvironmentImage { ///

A set of Docker images that are related by programming language and are managed by AWS CodeBuild.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentLanguage { ///

The list of Docker images that are related by the specified programming language.

#[serde(rename = "images")] @@ -452,7 +459,7 @@ pub struct EnvironmentLanguage { ///

A set of Docker images that are related by platform and are managed by AWS CodeBuild.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentPlatform { ///

The list of programming languages that are available for the specified platform.

#[serde(rename = "languages")] @@ -495,6 +502,10 @@ pub struct ImportSourceCredentialsInput { ///

The source provider used for this project.

#[serde(rename = "serverType")] pub server_type: String, + ///

Set to false to prevent overwriting the repository source credentials. Set to true to overwrite the repository source credentials. The default value is true.

+ #[serde(rename = "shouldOverwrite")] + #[serde(skip_serializing_if = "Option::is_none")] + pub should_overwrite: Option, ///

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is the app password.

#[serde(rename = "token")] pub token: String, @@ -505,7 +516,7 @@ pub struct ImportSourceCredentialsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportSourceCredentialsOutput { ///

The Amazon Resource Name (ARN) of the token.

#[serde(rename = "arn")] @@ -521,7 +532,7 @@ pub struct InvalidateProjectCacheInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InvalidateProjectCacheOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -540,7 +551,7 @@ pub struct ListBuildsForProjectInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBuildsForProjectOutput { ///

A list of build IDs for the specified build project, with each build ID representing a single build.

#[serde(rename = "ids")] @@ -565,7 +576,7 @@ pub struct ListBuildsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBuildsOutput { ///

A list of build IDs, with each build ID representing a single build.

#[serde(rename = "ids")] @@ -581,7 +592,7 @@ pub struct ListBuildsOutput { pub struct ListCuratedEnvironmentImagesInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCuratedEnvironmentImagesOutput { ///

Information about supported platforms for Docker images that are managed by AWS CodeBuild.

#[serde(rename = "platforms")] @@ -606,7 +617,7 @@ pub struct ListProjectsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProjectsOutput { ///

If there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token. To get the next batch of items in the list, call this operation again, adding the next token to the call.

#[serde(rename = "nextToken")] @@ -622,7 +633,7 @@ pub struct ListProjectsOutput { pub struct ListSourceCredentialsInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSourceCredentialsOutput { ///

A list of SourceCredentialsInfo objects. Each SourceCredentialsInfo object includes the authentication type, token ARN, and type of source provider for one set of credentials.

#[serde(rename = "sourceCredentialsInfos")] @@ -645,7 +656,7 @@ pub struct LogsConfig { ///

Information about build logs in Amazon CloudWatch Logs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogsLocation { ///

Information about Amazon CloudWatch Logs for a build project.

#[serde(rename = "cloudWatchLogs")] @@ -675,7 +686,7 @@ pub struct LogsLocation { ///

Describes a network interface.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkInterface { ///

The ID of the network interface.

#[serde(rename = "networkInterfaceId")] @@ -689,7 +700,7 @@ pub struct NetworkInterface { ///

Additional information about a build phase that has an error. You can use this information for troubleshooting.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PhaseContext { ///

An explanation of the build phase's context. This might include a command ID and an exit code.

#[serde(rename = "message")] @@ -703,7 +714,7 @@ pub struct PhaseContext { ///

Information about a build project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Project { ///

The Amazon Resource Name (ARN) of the build project.

#[serde(rename = "arn")] @@ -757,6 +768,10 @@ pub struct Project { #[serde(rename = "secondaryArtifacts")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_artifacts: Option>, + ///

An array of ProjectSourceVersion objects. If secondarySourceVersions is specified at the build level, then they take over these secondarySourceVersions (at the project level).

+ #[serde(rename = "secondarySourceVersions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub secondary_source_versions: Option>, ///

An array of ProjectSource objects.

#[serde(rename = "secondarySources")] #[serde(skip_serializing_if = "Option::is_none")] @@ -769,6 +784,10 @@ pub struct Project { #[serde(rename = "source")] #[serde(skip_serializing_if = "Option::is_none")] pub source: Option, + ///

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

+ #[serde(rename = "sourceVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_version: Option, ///

The tags for this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -829,7 +848,7 @@ pub struct ProjectArtifacts { ///

Information about the build badge for the build project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectBadge { ///

Set this to true to generate a publicly accessible URL for your project's build badge.

#[serde(rename = "badgeEnabled")] @@ -848,7 +867,7 @@ pub struct ProjectCache { #[serde(rename = "location")] #[serde(skip_serializing_if = "Option::is_none")] pub location: Option, - ///

If you use a LOCAL cache, the local cache mode. You can use one or more local cache modes at the same time.

  • LOCALSOURCECACHE mode caches Git metadata for primary and secondary sources. After the cache is created, subsequent builds pull only the change between commits. This mode is a good choice for projects with a clean working directory and a source that is a large Git repository. If you choose this option and your project does not use a Git repository (GitHub, GitHub Enterprise, or Bitbucket), the option is ignored.

  • LOCALDOCKERLAYERCACHE mode caches existing Docker layers. This mode is a good choice for projects that build or pull large Docker images. It can prevent the performance issues caused by pulling large Docker images down from the network.

    • You can use a Docker layer cache in the Linux enviornment only.

    • The privileged flag must be set so that your project has the required Docker permissions.

    • You should consider the security implications before you use a Docker layer cache.

  • LOCALCUSTOM_CACHE mode caches directories you specify in the buildspec file. This mode is a good choice if your build scenario is not suited to one of the other three local cache modes. If you use a custom cache:

    • Only directories can be specified for caching. You cannot specify individual files.

    • Symlinks are used to reference cached directories.

    • Cached directories are linked to your build before it downloads its project sources. Cached items are overriden if a source item has the same name. Directories are specified using cache paths in the buildspec file.

+ ///

If you use a LOCAL cache, the local cache mode. You can use one or more local cache modes at the same time.

  • LOCALSOURCECACHE mode caches Git metadata for primary and secondary sources. After the cache is created, subsequent builds pull only the change between commits. This mode is a good choice for projects with a clean working directory and a source that is a large Git repository. If you choose this option and your project does not use a Git repository (GitHub, GitHub Enterprise, or Bitbucket), the option is ignored.

  • LOCALDOCKERLAYERCACHE mode caches existing Docker layers. This mode is a good choice for projects that build or pull large Docker images. It can prevent the performance issues caused by pulling large Docker images down from the network.

    • You can use a Docker layer cache in the Linux environment only.

    • The privileged flag must be set so that your project has the required Docker permissions.

    • You should consider the security implications before you use a Docker layer cache.

  • LOCALCUSTOM_CACHE mode caches directories you specify in the buildspec file. This mode is a good choice if your build scenario is not suited to one of the other three local cache modes. If you use a custom cache:

    • Only directories can be specified for caching. You cannot specify individual files.

    • Symlinks are used to reference cached directories.

    • Cached directories are linked to your build before it downloads its project sources. Cached items are overriden if a source item has the same name. Directories are specified using cache paths in the buildspec file.

#[serde(rename = "modes")] #[serde(skip_serializing_if = "Option::is_none")] pub modes: Option>, @@ -878,7 +897,7 @@ pub struct ProjectEnvironment { #[serde(rename = "imagePullCredentialsType")] #[serde(skip_serializing_if = "Option::is_none")] pub image_pull_credentials_type: Option, - ///

Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images, and the specified build environment image is not provided by AWS CodeBuild with Docker support. Otherwise, all associated builds that attempt to interact with the Docker daemon fail. You must also start the Docker daemon so that builds can interact with it. One way to do this is to initialize the Docker daemon during the install phase of your build spec by running the following build commands. (Do not run these commands if the specified build environment image is provided by AWS CodeBuild with Docker support.)

If the operating system's base image is Ubuntu Linux:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 sh -c "until docker info; do echo .; sleep 1; done"

If the operating system's base image is Alpine Linux, add the -t argument to timeout:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 -t sh -c "until docker info; do echo .; sleep 1; done"

+ ///

Enables running the Docker daemon inside a Docker container. Set to true only if the build project is used to build Docker images. Otherwise, a build that attempts to interact with the Docker daemon fails.

You can initialize the Docker daemon during the install phase of your build by adding one of the following sets of commands to the install phase of your buildspec file:

If the operating system's base image is Ubuntu Linux:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&

- timeout 15 sh -c "until docker info; do echo .; sleep 1; done"

If the operating system's base image is Alpine Linux and the previous command does not work, add the -t argument to timeout:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&

- timeout -t 15 sh -c "until docker info; do echo .; sleep 1; done"

#[serde(rename = "privilegedMode")] #[serde(skip_serializing_if = "Option::is_none")] pub privileged_mode: Option, @@ -931,13 +950,13 @@ pub struct ProjectSource { pub type_: String, } -///

A source identifier and its corresponding version.

+///

A source identifier and its corresponding version.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProjectSourceVersion { ///

An identifier for a source in the build project.

#[serde(rename = "sourceIdentifier")] pub source_identifier: String, - ///

The source version for the corresponding source identifier. If specified, must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

+ ///

The source version for the corresponding source identifier. If specified, must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

#[serde(rename = "sourceVersion")] pub source_version: String, } @@ -983,7 +1002,7 @@ pub struct SourceAuth { ///

Information about the credentials for a GitHub, GitHub Enterprise, or Bitbucket repository.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SourceCredentialsInfo { ///

The Amazon Resource Name (ARN) of the token.

#[serde(rename = "arn")] @@ -1104,7 +1123,7 @@ pub struct StartBuildInput { #[serde(rename = "sourceTypeOverride")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type_override: Option, - ///

A version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

+ ///

A version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

#[serde(rename = "sourceVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub source_version: Option, @@ -1115,7 +1134,7 @@ pub struct StartBuildInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartBuildOutput { ///

Information about the build to be run.

#[serde(rename = "build")] @@ -1131,7 +1150,7 @@ pub struct StopBuildInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopBuildOutput { ///

Information about the build.

#[serde(rename = "build")] @@ -1193,6 +1212,10 @@ pub struct UpdateProjectInput { #[serde(rename = "secondaryArtifacts")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_artifacts: Option>, + ///

An array of ProjectSourceVersion objects. If secondarySourceVersions is specified at the build level, then they take over these secondarySourceVersions (at the project level).

+ #[serde(rename = "secondarySourceVersions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub secondary_source_versions: Option>, ///

An array of ProjectSource objects.

#[serde(rename = "secondarySources")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1205,6 +1228,10 @@ pub struct UpdateProjectInput { #[serde(rename = "source")] #[serde(skip_serializing_if = "Option::is_none")] pub source: Option, + ///

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

+ #[serde(rename = "sourceVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_version: Option, ///

The replacement set of tags for this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1220,7 +1247,7 @@ pub struct UpdateProjectInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProjectOutput { ///

Information about the build project that was changed.

#[serde(rename = "project")] @@ -1248,7 +1275,7 @@ pub struct UpdateWebhookInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateWebhookOutput { ///

Information about a repository's webhook that is associated with a project in AWS CodeBuild.

#[serde(rename = "webhook")] @@ -1275,7 +1302,7 @@ pub struct VpcConfig { ///

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Webhook { ///

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

#[serde(rename = "branchFilter")] @@ -1641,6 +1668,8 @@ pub enum ImportSourceCredentialsError { AccountLimitExceeded(String), ///

The input value that was provided is not valid.

InvalidInput(String), + ///

The specified AWS resource cannot be created, because an AWS resource with the same settings already exists.

+ ResourceAlreadyExists(String), } impl ImportSourceCredentialsError { @@ -1657,6 +1686,11 @@ impl ImportSourceCredentialsError { err.msg, )) } + "ResourceAlreadyExistsException" => { + return RusotoError::Service( + ImportSourceCredentialsError::ResourceAlreadyExists(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -1674,6 +1708,7 @@ impl Error for ImportSourceCredentialsError { match *self { ImportSourceCredentialsError::AccountLimitExceeded(ref cause) => cause, ImportSourceCredentialsError::InvalidInput(ref cause) => cause, + ImportSourceCredentialsError::ResourceAlreadyExists(ref cause) => cause, } } } @@ -2168,10 +2203,7 @@ impl CodeBuildClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CodeBuildClient { - CodeBuildClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2185,10 +2217,14 @@ impl CodeBuildClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CodeBuildClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CodeBuildClient { + CodeBuildClient { client, region } } } diff --git a/rusoto/services/codecommit/Cargo.toml b/rusoto/services/codecommit/Cargo.toml index 31d1cdb9650..f7333d7c72f 100644 --- a/rusoto/services/codecommit/Cargo.toml +++ b/rusoto/services/codecommit/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_codecommit" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/codecommit/README.md b/rusoto/services/codecommit/README.md index 5a6dc974888..82c350cd7b6 100644 --- a/rusoto/services/codecommit/README.md +++ b/rusoto/services/codecommit/README.md @@ -23,9 +23,16 @@ To use `rusoto_codecommit` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_codecommit = "0.40.0" +rusoto_codecommit = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/codecommit/src/custom/mod.rs b/rusoto/services/codecommit/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/codecommit/src/custom/mod.rs +++ b/rusoto/services/codecommit/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/codecommit/src/generated.rs b/rusoto/services/codecommit/src/generated.rs index 0b2ce293f2f..654d21849ff 100644 --- a/rusoto/services/codecommit/src/generated.rs +++ b/rusoto/services/codecommit/src/generated.rs @@ -9,21 +9,142 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; +///

Information about errors in a BatchDescribeMergeConflicts operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CodeCommitBatchDescribeMergeConflictsError { + ///

The name of the exception.

+ #[serde(rename = "exceptionName")] + pub exception_name: String, + ///

The path to the file.

+ #[serde(rename = "filePath")] + pub file_path: String, + ///

The message provided by the exception.

+ #[serde(rename = "message")] + pub message: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchDescribeMergeConflictsInput { + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The path of the target files used to describe the conflicts. If not specified, the default is all conflict files.

+ #[serde(rename = "filePaths")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_paths: Option>, + ///

The maximum number of files to include in the output.

+ #[serde(rename = "maxConflictFiles")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_conflict_files: Option, + ///

The maximum number of merge hunks to include in the output.

+ #[serde(rename = "maxMergeHunks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_merge_hunks: Option, + ///

The merge option or strategy you want to use to merge the code.

+ #[serde(rename = "mergeOption")] + pub merge_option: String, + ///

An enumeration token that when provided in a request, returns the next batch of the results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The name of the repository that contains the merge conflicts you want to review.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchDescribeMergeConflictsOutput { + ///

The commit ID of the merge base.

+ #[serde(rename = "baseCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base_commit_id: Option, + ///

A list of conflicts for each file, including the conflict metadata and the hunks of the differences between the files.

+ #[serde(rename = "conflicts")] + pub conflicts: Vec, + ///

The commit ID of the destination commit specifier that was used in the merge evaluation.

+ #[serde(rename = "destinationCommitId")] + pub destination_commit_id: String, + ///

A list of any errors returned while describing the merge conflicts for each file.

+ #[serde(rename = "errors")] + #[serde(skip_serializing_if = "Option::is_none")] + pub errors: Option>, + ///

An enumeration token that can be used in a request to return the next batch of the results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The commit ID of the source commit specifier that was used in the merge evaluation.

+ #[serde(rename = "sourceCommitId")] + pub source_commit_id: String, +} + +///

Returns information about errors in a BatchGetCommits operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CodeCommitBatchGetCommitsError { + ///

A commit ID that either could not be found or was not in a valid format.

+ #[serde(rename = "commitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_id: Option, + ///

An error code that specifies whether the commit ID was not valid or not found.

+ #[serde(rename = "errorCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_code: Option, + ///

An error message that provides detail about why the commit ID either was not found or was not valid.

+ #[serde(rename = "errorMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_message: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchGetCommitsInput { + ///

The full commit IDs of the commits to get information about.

You must supply the full SHAs of each commit. You cannot use shortened SHAs.

+ #[serde(rename = "commitIds")] + pub commit_ids: Vec, + ///

The name of the repository that contains the commits.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchGetCommitsOutput { + ///

An array of commit data type objects, each of which contains information about a specified commit.

+ #[serde(rename = "commits")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commits: Option>, + ///

Returns any commit IDs for which information could not be found. For example, if one of the commit IDs was a shortened SHA or that commit was not found in the specified repository, the ID will return an error object with additional information.

+ #[serde(rename = "errors")] + #[serde(skip_serializing_if = "Option::is_none")] + pub errors: Option>, +} + ///

Represents the input of a batch get repositories operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchGetRepositoriesInput { @@ -34,7 +155,7 @@ pub struct BatchGetRepositoriesInput { ///

Represents the output of a batch get repositories operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetRepositoriesOutput { ///

A list of repositories returned by the batch get repositories operation.

#[serde(rename = "repositories")] @@ -48,7 +169,7 @@ pub struct BatchGetRepositoriesOutput { ///

Returns information about a specific Git blob object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BlobMetadata { ///

The full ID of the blob.

#[serde(rename = "blobId")] @@ -66,7 +187,7 @@ pub struct BlobMetadata { ///

Returns information about a branch.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BranchInfo { ///

The name of the branch.

#[serde(rename = "branchName")] @@ -80,7 +201,7 @@ pub struct BranchInfo { ///

Returns information about a specific comment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Comment { ///

The Amazon Resource Name (ARN) of the person who posted the comment.

#[serde(rename = "authorArn")] @@ -118,7 +239,7 @@ pub struct Comment { ///

Returns information about comments on the comparison between two commits.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CommentsForComparedCommit { ///

The full blob ID of the commit used to establish the 'after' of the comparison.

#[serde(rename = "afterBlobId")] @@ -152,7 +273,7 @@ pub struct CommentsForComparedCommit { ///

Returns information about comments on a pull request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CommentsForPullRequest { ///

The full blob ID of the file on which you want to comment on the source commit.

#[serde(rename = "afterBlobId")] @@ -190,7 +311,7 @@ pub struct CommentsForPullRequest { ///

Returns information about a specific commit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Commit { ///

Any additional data associated with the specified commit.

#[serde(rename = "additionalData")] @@ -222,6 +343,83 @@ pub struct Commit { pub tree_id: Option, } +///

Information about conflicts in a merge operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Conflict { + ///

Metadata about a conflict in a merge operation.

+ #[serde(rename = "conflictMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_metadata: Option, + ///

A list of hunks that contain the differences between files or lines causing the conflict.

+ #[serde(rename = "mergeHunks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub merge_hunks: Option>, +} + +///

Information about the metadata for a conflict in a merge operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ConflictMetadata { + ///

A boolean value indicating whether there are conflicts in the content of a file.

+ #[serde(rename = "contentConflict")] + #[serde(skip_serializing_if = "Option::is_none")] + pub content_conflict: Option, + ///

A boolean value indicating whether there are conflicts in the file mode of a file.

+ #[serde(rename = "fileModeConflict")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_mode_conflict: Option, + ///

The file modes of the file in the source, destination, and base of the merge.

+ #[serde(rename = "fileModes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_modes: Option, + ///

The path of the file that contains conflicts.

+ #[serde(rename = "filePath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_path: Option, + ///

The file sizes of the file in the source, destination, and base of the merge.

+ #[serde(rename = "fileSizes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_sizes: Option, + ///

A boolean value (true or false) indicating whether the file is binary or textual in the source, destination, and base of the merge.

+ #[serde(rename = "isBinaryFile")] + #[serde(skip_serializing_if = "Option::is_none")] + pub is_binary_file: Option, + ///

Whether an add, modify, or delete operation caused the conflict between the source and destination of the merge.

+ #[serde(rename = "mergeOperations")] + #[serde(skip_serializing_if = "Option::is_none")] + pub merge_operations: Option, + ///

The number of conflicts, including both hunk conflicts and metadata conflicts.

+ #[serde(rename = "numberOfConflicts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_conflicts: Option, + ///

A boolean value (true or false) indicating whether there are conflicts between the branches in the object type of a file, folder, or submodule.

+ #[serde(rename = "objectTypeConflict")] + #[serde(skip_serializing_if = "Option::is_none")] + pub object_type_conflict: Option, + ///

Information about any object type conflicts in a merge operation.

+ #[serde(rename = "objectTypes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub object_types: Option, +} + +///

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ConflictResolution { + ///

Files that will be deleted as part of the merge conflict resolution.

+ #[serde(rename = "deleteFiles")] + #[serde(skip_serializing_if = "Option::is_none")] + pub delete_files: Option>, + ///

Files that will have content replaced as part of the merge conflict resolution.

+ #[serde(rename = "replaceContents")] + #[serde(skip_serializing_if = "Option::is_none")] + pub replace_contents: Option>, + ///

File modes that will be set as part of the merge conflict resolution.

+ #[serde(rename = "setFileModes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub set_file_modes: Option>, +} + ///

Represents the input of a create branch operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateBranchInput { @@ -257,7 +455,7 @@ pub struct CreateCommitInput { #[serde(rename = "email")] #[serde(skip_serializing_if = "Option::is_none")] pub email: Option, - ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders.

+ ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

#[serde(rename = "keepEmptyFolders")] #[serde(skip_serializing_if = "Option::is_none")] pub keep_empty_folders: Option, @@ -279,7 +477,7 @@ pub struct CreateCommitInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCommitOutput { ///

The full commit ID of the commit that contains your committed file changes.

#[serde(rename = "commitId")] @@ -322,7 +520,7 @@ pub struct CreatePullRequestInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePullRequestOutput { ///

Information about the newly created pull request.

#[serde(rename = "pullRequest")] @@ -347,7 +545,7 @@ pub struct CreateRepositoryInput { ///

Represents the output of a create repository operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRepositoryOutput { ///

Information about the newly created repository.

#[serde(rename = "repositoryMetadata")] @@ -355,6 +553,63 @@ pub struct CreateRepositoryOutput { pub repository_metadata: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateUnreferencedMergeCommitInput { + ///

The name of the author who created the unreferenced commit. This information will be used as both the author and committer for the commit.

+ #[serde(rename = "authorName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub author_name: Option, + ///

The commit message for the unreferenced commit.

+ #[serde(rename = "commitMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_message: Option, + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

+ #[serde(rename = "conflictResolution")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The email address for the person who created the unreferenced commit.

+ #[serde(rename = "email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

+ #[serde(rename = "keepEmptyFolders")] + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_empty_folders: Option, + ///

The merge option or strategy you want to use to merge the code.

+ #[serde(rename = "mergeOption")] + pub merge_option: String, + ///

The name of the repository where you want to create the unreferenced merge commit.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateUnreferencedMergeCommitOutput { + ///

The full commit ID of the commit that contains your merge results.

+ #[serde(rename = "commitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_id: Option, + ///

The full SHA-1 pointer of the tree information for the commit that contains the merge results.

+ #[serde(rename = "treeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tree_id: Option, +} + ///

Represents the input of a delete branch operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteBranchInput { @@ -368,7 +623,7 @@ pub struct DeleteBranchInput { ///

Represents the output of a delete branch operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBranchOutput { ///

Information about the branch deleted by the operation, including the branch name and the commit ID that was the tip of the branch.

#[serde(rename = "deletedBranch")] @@ -384,7 +639,7 @@ pub struct DeleteCommentContentInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCommentContentOutput { ///

Information about the comment you just deleted.

#[serde(rename = "comment")] @@ -433,7 +688,7 @@ pub struct DeleteFileInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFileOutput { ///

The blob ID removed from the tree as part of deleting the file.

#[serde(rename = "blobId")] @@ -459,7 +714,7 @@ pub struct DeleteRepositoryInput { ///

Represents the output of a delete repository operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRepositoryOutput { ///

The ID of the repository that was deleted.

#[serde(rename = "repositoryId")] @@ -467,6 +722,66 @@ pub struct DeleteRepositoryOutput { pub repository_id: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeMergeConflictsInput { + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The path of the target files used to describe the conflicts.

+ #[serde(rename = "filePath")] + pub file_path: String, + ///

The maximum number of merge hunks to include in the output.

+ #[serde(rename = "maxMergeHunks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_merge_hunks: Option, + ///

The merge option or strategy you want to use to merge the code.

+ #[serde(rename = "mergeOption")] + pub merge_option: String, + ///

An enumeration token that when provided in a request, returns the next batch of the results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The name of the repository where you want to get information about a merge conflict.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeMergeConflictsOutput { + ///

The commit ID of the merge base.

+ #[serde(rename = "baseCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base_commit_id: Option, + ///

Contains metadata about the conflicts found in the merge.

+ #[serde(rename = "conflictMetadata")] + pub conflict_metadata: ConflictMetadata, + ///

The commit ID of the destination commit specifier that was used in the merge evaluation.

+ #[serde(rename = "destinationCommitId")] + pub destination_commit_id: String, + ///

A list of merge hunks of the differences between the files or lines.

+ #[serde(rename = "mergeHunks")] + pub merge_hunks: Vec, + ///

An enumeration token that can be used in a request to return the next batch of the results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The commit ID of the source commit specifier that was used in the merge evaluation.

+ #[serde(rename = "sourceCommitId")] + pub source_commit_id: String, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribePullRequestEventsInput { ///

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.

@@ -491,7 +806,7 @@ pub struct DescribePullRequestEventsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePullRequestEventsOutput { ///

An enumeration token that can be used in a request to return the next batch of the results.

#[serde(rename = "nextToken")] @@ -504,7 +819,7 @@ pub struct DescribePullRequestEventsOutput { ///

Returns information about a set of differences for a commit specifier.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Difference { ///

Information about an afterBlob data type object, including the ID, the file mode permission code, and the path.

#[serde(rename = "afterBlob")] @@ -522,7 +837,7 @@ pub struct Difference { ///

Returns information about a file in a repository.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct File { ///

The fully-qualified path to the file in the repository.

#[serde(rename = "absolutePath")] @@ -544,7 +859,7 @@ pub struct File { ///

A file that will be added, updated, or deleted as part of a commit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FileMetadata { ///

The full path to the file that will be added or updated, including the name of the file.

#[serde(rename = "absolutePath")] @@ -560,9 +875,45 @@ pub struct FileMetadata { pub file_mode: Option, } +///

Information about file modes in a merge or pull request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FileModes { + ///

The file mode of a file in the base of a merge or pull request.

+ #[serde(rename = "base")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + ///

The file mode of a file in the destination of a merge or pull request.

+ #[serde(rename = "destination")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, + ///

The file mode of a file in the source of a merge or pull request.

+ #[serde(rename = "source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, +} + +///

Information about the size of files in a merge or pull request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FileSizes { + ///

The size of a file in the base of a merge or pull request.

+ #[serde(rename = "base")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + ///

The size of a file in the destination of a merge or pull request.

+ #[serde(rename = "destination")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, + ///

The size of a file in the source of a merge or pull request.

+ #[serde(rename = "source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, +} + ///

Returns information about a folder in a repository.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Folder { ///

The fully-qualified path of the folder in the repository.

#[serde(rename = "absolutePath")] @@ -591,7 +942,7 @@ pub struct GetBlobInput { ///

Represents the output of a get blob operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBlobOutput { ///

The content of the blob, usually a file.

#[serde(rename = "content")] @@ -618,7 +969,7 @@ pub struct GetBranchInput { ///

Represents the output of a get branch operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBranchOutput { ///

The name of the branch.

#[serde(rename = "branch")] @@ -634,7 +985,7 @@ pub struct GetCommentInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCommentOutput { ///

The contents of the comment.

#[serde(rename = "comment")] @@ -665,7 +1016,7 @@ pub struct GetCommentsForComparedCommitInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCommentsForComparedCommitOutput { ///

A list of comment objects on the compared commit.

#[serde(rename = "commentsForComparedCommitData")] @@ -705,7 +1056,7 @@ pub struct GetCommentsForPullRequestInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCommentsForPullRequestOutput { ///

An array of comment objects on the pull request.

#[serde(rename = "commentsForPullRequestData")] @@ -730,7 +1081,7 @@ pub struct GetCommitInput { ///

Represents the output of a get commit operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCommitOutput { ///

A commit data type object that contains information about the specified commit.

#[serde(rename = "commit")] @@ -768,7 +1119,7 @@ pub struct GetDifferencesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDifferencesOutput { ///

An enumeration token that can be used in a request to return the next batch of the results.

#[serde(rename = "NextToken")] @@ -795,7 +1146,7 @@ pub struct GetFileInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFileOutput { ///

The blob ID of the object that represents the file content.

#[serde(rename = "blobId")] @@ -837,7 +1188,7 @@ pub struct GetFolderInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFolderOutput { ///

The full commit ID used as a reference for which version of the folder content is returned.

#[serde(rename = "commitId")] @@ -867,14 +1218,72 @@ pub struct GetFolderOutput { pub tree_id: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMergeCommitInput { + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The name of the repository that contains the merge commit about which you want to get information.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMergeCommitOutput { + ///

The commit ID of the merge base.

+ #[serde(rename = "baseCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base_commit_id: Option, + ///

The commit ID of the destination commit specifier that was used in the merge evaluation.

+ #[serde(rename = "destinationCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_commit_id: Option, + ///

The commit ID for the merge commit created when the source branch was merged into the destination branch. If the fast-forward merge strategy was used, no merge commit exists.

+ #[serde(rename = "mergedCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub merged_commit_id: Option, + ///

The commit ID of the source commit specifier that was used in the merge evaluation.

+ #[serde(rename = "sourceCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_commit_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetMergeConflictsInput { + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

#[serde(rename = "destinationCommitSpecifier")] pub destination_commit_specifier: String, - ///

The merge option or strategy you want to use to merge the code. The only valid value is FAST_FORWARD_MERGE.

+ ///

The maximum number of files to include in the output.

+ #[serde(rename = "maxConflictFiles")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_conflict_files: Option, + ///

The merge option or strategy you want to use to merge the code.

#[serde(rename = "mergeOption")] pub merge_option: String, + ///

An enumeration token that when provided in a request, returns the next batch of the results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, ///

The name of the repository where the pull request was created.

#[serde(rename = "repositoryName")] pub repository_name: String, @@ -884,14 +1293,63 @@ pub struct GetMergeConflictsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMergeConflictsOutput { + ///

The commit ID of the merge base.

+ #[serde(rename = "baseCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base_commit_id: Option, + ///

A list of metadata for any conflicting files. If the specified merge strategy is FAST_FORWARD_MERGE, this list will always be empty.

+ #[serde(rename = "conflictMetadataList")] + pub conflict_metadata_list: Vec, ///

The commit ID of the destination commit specifier that was used in the merge evaluation.

#[serde(rename = "destinationCommitId")] pub destination_commit_id: String, - ///

A Boolean value that indicates whether the code is mergable by the specified merge option.

+ ///

A Boolean value that indicates whether the code is mergeable by the specified merge option.

#[serde(rename = "mergeable")] pub mergeable: bool, + ///

An enumeration token that can be used in a request to return the next batch of the results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The commit ID of the source commit specifier that was used in the merge evaluation.

+ #[serde(rename = "sourceCommitId")] + pub source_commit_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMergeOptionsInput { + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The name of the repository that contains the commits about which you want to get merge options.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMergeOptionsOutput { + ///

The commit ID of the merge base.

+ #[serde(rename = "baseCommitId")] + pub base_commit_id: String, + ///

The commit ID of the destination commit specifier that was used in the merge evaluation.

+ #[serde(rename = "destinationCommitId")] + pub destination_commit_id: String, + ///

The merge option or strategy used to merge the code.

+ #[serde(rename = "mergeOptions")] + pub merge_options: Vec, ///

The commit ID of the source commit specifier that was used in the merge evaluation.

#[serde(rename = "sourceCommitId")] pub source_commit_id: String, @@ -905,7 +1363,7 @@ pub struct GetPullRequestInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPullRequestOutput { ///

Information about the specified pull request.

#[serde(rename = "pullRequest")] @@ -922,7 +1380,7 @@ pub struct GetRepositoryInput { ///

Represents the output of a get repository operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRepositoryOutput { ///

Information about the repository.

#[serde(rename = "repositoryMetadata")] @@ -940,7 +1398,7 @@ pub struct GetRepositoryTriggersInput { ///

Represents the output of a get repository triggers operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRepositoryTriggersOutput { ///

The system-generated unique ID for the trigger.

#[serde(rename = "configurationId")] @@ -952,6 +1410,24 @@ pub struct GetRepositoryTriggersOutput { pub triggers: Option>, } +///

Information about whether a file is binary or textual in a merge or pull request operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct IsBinaryFile { + ///

The binary or non-binary status of a file in the base of a merge or pull request.

+ #[serde(rename = "base")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + ///

The binary or non-binary status of a file in the destination of a merge or pull request.

+ #[serde(rename = "destination")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, + ///

The binary or non-binary status of file in the source of a merge or pull request.

+ #[serde(rename = "source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, +} + ///

Represents the input of a list branches operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListBranchesInput { @@ -966,7 +1442,7 @@ pub struct ListBranchesInput { ///

Represents the output of a list branches operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBranchesOutput { ///

The list of branch names.

#[serde(rename = "branches")] @@ -1002,7 +1478,7 @@ pub struct ListPullRequestsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPullRequestsOutput { ///

An enumeration token that when provided in a request, returns the next batch of the results.

#[serde(rename = "nextToken")] @@ -1032,7 +1508,7 @@ pub struct ListRepositoriesInput { ///

Represents the output of a list repositories operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRepositoriesOutput { ///

An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

#[serde(rename = "nextToken")] @@ -1056,7 +1532,7 @@ pub struct ListTagsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceOutput { ///

An enumeration token that allows the operation to batch the next results of the operation.

#[serde(rename = "nextToken")] @@ -1085,18 +1561,226 @@ pub struct Location { pub relative_file_version: Option, } -///

Returns information about a merge or potential merge between a source reference and a destination reference in a pull request.

-#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] -pub struct MergeMetadata { - ///

A Boolean value indicating whether the merge has been made.

- #[serde(rename = "isMerged")] - #[serde(skip_serializing_if = "Option::is_none")] - pub is_merged: Option, - ///

The Amazon Resource Name (ARN) of the user who merged the branches.

- #[serde(rename = "mergedBy")] - #[serde(skip_serializing_if = "Option::is_none")] - pub merged_by: Option, +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct MergeBranchesByFastForwardInput { + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The name of the repository where you want to merge two branches.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, + ///

The branch where the merge will be applied.

+ #[serde(rename = "targetBranch")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_branch: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeBranchesByFastForwardOutput { + ///

The commit ID of the merge in the destination or target branch.

+ #[serde(rename = "commitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_id: Option, + ///

The tree ID of the merge in the destination or target branch.

+ #[serde(rename = "treeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tree_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct MergeBranchesBySquashInput { + ///

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

+ #[serde(rename = "authorName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub author_name: Option, + ///

The commit message for the merge.

+ #[serde(rename = "commitMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_message: Option, + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

+ #[serde(rename = "conflictResolution")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The email address of the person merging the branches. This information will be used in the commit information for the merge.

+ #[serde(rename = "email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

+ #[serde(rename = "keepEmptyFolders")] + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_empty_folders: Option, + ///

The name of the repository where you want to merge two branches.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, + ///

The branch where the merge will be applied.

+ #[serde(rename = "targetBranch")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_branch: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeBranchesBySquashOutput { + ///

The commit ID of the merge in the destination or target branch.

+ #[serde(rename = "commitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_id: Option, + ///

The tree ID of the merge in the destination or target branch.

+ #[serde(rename = "treeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tree_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct MergeBranchesByThreeWayInput { + ///

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

+ #[serde(rename = "authorName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub author_name: Option, + ///

The commit message to include in the commit information for the merge.

+ #[serde(rename = "commitMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_message: Option, + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

+ #[serde(rename = "conflictResolution")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "destinationCommitSpecifier")] + pub destination_commit_specifier: String, + ///

The email address of the person merging the branches. This information will be used in the commit information for the merge.

+ #[serde(rename = "email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

+ #[serde(rename = "keepEmptyFolders")] + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_empty_folders: Option, + ///

The name of the repository where you want to merge two branches.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

+ #[serde(rename = "sourceCommitSpecifier")] + pub source_commit_specifier: String, + ///

The branch where the merge will be applied.

+ #[serde(rename = "targetBranch")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_branch: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeBranchesByThreeWayOutput { + ///

The commit ID of the merge in the destination or target branch.

+ #[serde(rename = "commitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_id: Option, + ///

The tree ID of the merge in the destination or target branch.

+ #[serde(rename = "treeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tree_id: Option, +} + +///

Information about merge hunks in a merge or pull request operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeHunk { + ///

Information about the merge hunk in the base of a merge or pull request.

+ #[serde(rename = "base")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + ///

Information about the merge hunk in the destination of a merge or pull request.

+ #[serde(rename = "destination")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, + ///

A Boolean value indicating whether a combination of hunks contains a conflict. Conflicts occur when the same file or the same lines in a file were modified in both the source and destination of a merge or pull request. Valid values include true, false, and null. This will be true when the hunk represents a conflict and one or more files contains a line conflict. File mode conflicts in a merge will not set this to be true.

+ #[serde(rename = "isConflict")] + #[serde(skip_serializing_if = "Option::is_none")] + pub is_conflict: Option, + ///

Information about the merge hunk in the source of a merge or pull request.

+ #[serde(rename = "source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, +} + +///

Information about the details of a merge hunk that contains a conflict in a merge or pull request operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeHunkDetail { + ///

The end position of the hunk in the merge result.

+ #[serde(rename = "endLine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end_line: Option, + ///

The base-64 encoded content of the hunk merged region that might or might not contain a conflict.

+ #[serde(rename = "hunkContent")] + #[serde(skip_serializing_if = "Option::is_none")] + pub hunk_content: Option, + ///

The start position of the hunk in the merge result.

+ #[serde(rename = "startLine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_line: Option, +} + +///

Returns information about a merge or potential merge between a source reference and a destination reference in a pull request.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeMetadata { + ///

A Boolean value indicating whether the merge has been made.

+ #[serde(rename = "isMerged")] + #[serde(skip_serializing_if = "Option::is_none")] + pub is_merged: Option, + ///

The commit ID for the merge commit, if any.

+ #[serde(rename = "mergeCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub merge_commit_id: Option, + ///

The merge strategy used in the merge.

+ #[serde(rename = "mergeOption")] + #[serde(skip_serializing_if = "Option::is_none")] + pub merge_option: Option, + ///

The Amazon Resource Name (ARN) of the user who merged the branches.

+ #[serde(rename = "mergedBy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub merged_by: Option, +} + +///

Information about the file operation conflicts in a merge operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergeOperations { + ///

The operation on a file in the destination of a merge or pull request.

+ #[serde(rename = "destination")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, + ///

The operation on a file (add, modify, or delete) of a file in the source of a merge or pull request.

+ #[serde(rename = "source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1114,7 +1798,7 @@ pub struct MergePullRequestByFastForwardInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MergePullRequestByFastForwardOutput { ///

Information about the specified pull request, including information about the merge.

#[serde(rename = "pullRequest")] @@ -1122,12 +1806,130 @@ pub struct MergePullRequestByFastForwardOutput { pub pull_request: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct MergePullRequestBySquashInput { + ///

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

+ #[serde(rename = "authorName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub author_name: Option, + ///

The commit message to include in the commit information for the merge.

+ #[serde(rename = "commitMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_message: Option, + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

+ #[serde(rename = "conflictResolution")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The email address of the person merging the branches. This information will be used in the commit information for the merge.

+ #[serde(rename = "email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

+ #[serde(rename = "keepEmptyFolders")] + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_empty_folders: Option, + ///

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

+ #[serde(rename = "pullRequestId")] + pub pull_request_id: String, + ///

The name of the repository where the pull request was created.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.

+ #[serde(rename = "sourceCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_commit_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergePullRequestBySquashOutput { + #[serde(rename = "pullRequest")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pull_request: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct MergePullRequestByThreeWayInput { + ///

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

+ #[serde(rename = "authorName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub author_name: Option, + ///

The commit message to include in the commit information for the merge.

+ #[serde(rename = "commitMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_message: Option, + ///

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

+ #[serde(rename = "conflictDetailLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_detail_level: Option, + ///

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

+ #[serde(rename = "conflictResolution")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution: Option, + ///

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

+ #[serde(rename = "conflictResolutionStrategy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub conflict_resolution_strategy: Option, + ///

The email address of the person merging the branches. This information will be used in the commit information for the merge.

+ #[serde(rename = "email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + ///

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

+ #[serde(rename = "keepEmptyFolders")] + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_empty_folders: Option, + ///

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

+ #[serde(rename = "pullRequestId")] + pub pull_request_id: String, + ///

The name of the repository where the pull request was created.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, + ///

The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.

+ #[serde(rename = "sourceCommitId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_commit_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MergePullRequestByThreeWayOutput { + #[serde(rename = "pullRequest")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pull_request: Option, +} + +///

Information about the type of an object in a merge operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ObjectTypes { + ///

The type of the object in the base commit of the merge.

+ #[serde(rename = "base")] + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + ///

The type of the object in the destination branch.

+ #[serde(rename = "destination")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, + ///

The type of the object in the source branch.

+ #[serde(rename = "source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PostCommentForComparedCommitInput { ///

To establish the directionality of the comparison, the full commit ID of the 'after' commit.

#[serde(rename = "afterCommitId")] pub after_commit_id: String, - ///

To establish the directionality of the comparison, the full commit ID of the 'before' commit.

+ ///

To establish the directionality of the comparison, the full commit ID of the 'before' commit.

This is required for commenting on any commit unless that commit is the initial commit.

#[serde(rename = "beforeCommitId")] #[serde(skip_serializing_if = "Option::is_none")] pub before_commit_id: Option, @@ -1148,7 +1950,7 @@ pub struct PostCommentForComparedCommitInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PostCommentForComparedCommitOutput { ///

In the directionality you established, the blob ID of the 'after' blob.

#[serde(rename = "afterBlobId")] @@ -1208,7 +2010,7 @@ pub struct PostCommentForPullRequestInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PostCommentForPullRequestOutput { ///

In the directionality of the pull request, the blob ID of the 'after' blob.

#[serde(rename = "afterBlobId")] @@ -1259,7 +2061,7 @@ pub struct PostCommentReplyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PostCommentReplyOutput { ///

Information about the reply to a comment.

#[serde(rename = "comment")] @@ -1269,7 +2071,7 @@ pub struct PostCommentReplyOutput { ///

Returns information about a pull request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequest { ///

The Amazon Resource Name (ARN) of the user who created the pull request.

#[serde(rename = "authorArn")] @@ -1311,7 +2113,7 @@ pub struct PullRequest { ///

Metadata about the pull request that is used when comparing the pull request source with its destination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequestCreatedEventMetadata { ///

The commit ID of the tip of the branch specified as the destination branch when the pull request was created.

#[serde(rename = "destinationCommitId")] @@ -1333,7 +2135,7 @@ pub struct PullRequestCreatedEventMetadata { ///

Returns information about a pull request event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequestEvent { ///

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.

#[serde(rename = "actorArn")] @@ -1373,7 +2175,7 @@ pub struct PullRequestEvent { ///

Returns information about the change in the merge state for a pull request event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequestMergedStateChangedEventMetadata { ///

The name of the branch that the pull request will be merged into.

#[serde(rename = "destinationReference")] @@ -1391,7 +2193,7 @@ pub struct PullRequestMergedStateChangedEventMetadata { ///

Information about an update to the source branch of a pull request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequestSourceReferenceUpdatedEventMetadata { ///

The full commit ID of the commit in the source branch that was the tip of the branch at the time the pull request was updated.

#[serde(rename = "afterCommitId")] @@ -1413,7 +2215,7 @@ pub struct PullRequestSourceReferenceUpdatedEventMetadata { ///

Information about a change to the status of a pull request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequestStatusChangedEventMetadata { ///

The changed status of the pull request.

#[serde(rename = "pullRequestStatus")] @@ -1423,7 +2225,7 @@ pub struct PullRequestStatusChangedEventMetadata { ///

Returns information about a pull request target.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PullRequestTarget { ///

The full commit ID that is the tip of the destination branch. This is the commit where the pull request was or will be merged.

#[serde(rename = "destinationCommit")] @@ -1522,7 +2324,7 @@ pub struct PutFileInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutFileOutput { ///

The ID of the blob, which is its SHA-1 pointer.

#[serde(rename = "blobId")] @@ -1548,7 +2350,7 @@ pub struct PutRepositoryTriggersInput { ///

Represents the output of a put repository triggers operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRepositoryTriggersOutput { ///

The system-generated unique ID for the create or update operation.

#[serde(rename = "configurationId")] @@ -1556,9 +2358,33 @@ pub struct PutRepositoryTriggersOutput { pub configuration_id: Option, } +///

Information about a replacement content entry in the conflict of a merge or pull request operation.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ReplaceContentEntry { + ///

The base-64 encoded content to use when the replacement type is USE_NEW_CONTENT.

+ #[serde(rename = "content")] + #[serde( + deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", + serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", + default + )] + #[serde(skip_serializing_if = "Option::is_none")] + pub content: Option, + ///

The file mode to apply during conflict resoltion.

+ #[serde(rename = "fileMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_mode: Option, + ///

The path of the conflicting file.

+ #[serde(rename = "filePath")] + pub file_path: String, + ///

The replacement type to use when determining how to resolve the conflict.

+ #[serde(rename = "replacementType")] + pub replacement_type: String, +} + ///

Information about a repository.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RepositoryMetadata { ///

The Amazon Resource Name (ARN) of the repository.

#[serde(rename = "Arn")] @@ -1604,7 +2430,7 @@ pub struct RepositoryMetadata { ///

Information about a repository name and ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RepositoryNameIdPair { ///

The ID associated with the repository.

#[serde(rename = "repositoryId")] @@ -1619,7 +2445,7 @@ pub struct RepositoryNameIdPair { ///

Information about a trigger for a repository.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RepositoryTrigger { - ///

The branches that will be included in the trigger configuration. If you specify an empty array, the trigger will apply to all branches.

While no content is required in the array, you must include the array itself.

+ ///

The branches that will be included in the trigger configuration. If you specify an empty array, the trigger will apply to all branches.

Although no content is required in the array, you must include the array itself.

#[serde(rename = "branches")] #[serde(skip_serializing_if = "Option::is_none")] pub branches: Option>, @@ -1627,10 +2453,10 @@ pub struct RepositoryTrigger { #[serde(rename = "customData")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_data: Option, - ///

The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).

+ ///

The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon SNS.

#[serde(rename = "destinationArn")] pub destination_arn: String, - ///

The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS).

The valid value "all" cannot be used with any other values.

+ ///

The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.

The valid value "all" cannot be used with any other values.

#[serde(rename = "events")] pub events: Vec, ///

The name of the trigger.

@@ -1640,7 +2466,7 @@ pub struct RepositoryTrigger { ///

A trigger failed to run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RepositoryTriggerExecutionFailure { ///

Additional message information about the trigger that did not run.

#[serde(rename = "failureMessage")] @@ -1677,7 +2503,7 @@ pub struct SourceFileSpecifier { ///

Returns information about a submodule reference in a repository folder.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubModule { ///

The fully qualified path to the folder that contains the reference to the submodule.

#[serde(rename = "absolutePath")] @@ -1695,7 +2521,7 @@ pub struct SubModule { ///

Returns information about a symbolic link in a repository folder.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SymbolicLink { ///

The fully-qualified path to the folder that contains the symbolic link.

#[serde(rename = "absolutePath")] @@ -1753,7 +2579,7 @@ pub struct TestRepositoryTriggersInput { ///

Represents the output of a test repository triggers operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestRepositoryTriggersOutput { ///

The list of triggers that were not able to be tested. This list provides the names of the triggers that could not be tested, separated by commas.

#[serde(rename = "failedExecutions")] @@ -1786,7 +2612,7 @@ pub struct UpdateCommentInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCommentOutput { ///

Information about the updated comment.

#[serde(rename = "comment")] @@ -1816,7 +2642,7 @@ pub struct UpdatePullRequestDescriptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePullRequestDescriptionOutput { ///

Information about the updated pull request.

#[serde(rename = "pullRequest")] @@ -1834,7 +2660,7 @@ pub struct UpdatePullRequestStatusInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePullRequestStatusOutput { ///

Information about the pull request.

#[serde(rename = "pullRequest")] @@ -1852,7 +2678,7 @@ pub struct UpdatePullRequestTitleInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePullRequestTitleOutput { ///

Information about the updated pull request.

#[serde(rename = "pullRequest")] @@ -1884,7 +2710,7 @@ pub struct UpdateRepositoryNameInput { ///

Information about the user who made a specified commit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserInfo { ///

The date when the specified commit was commited, in timestamp format with GMT offset.

#[serde(rename = "date")] @@ -1900,9 +2726,13 @@ pub struct UserInfo { pub name: Option, } -/// Errors returned by BatchGetRepositories +/// Errors returned by BatchDescribeMergeConflicts #[derive(Debug, PartialEq)] -pub enum BatchGetRepositoriesError { +pub enum BatchDescribeMergeConflictsError { + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

A commit was not specified.

+ CommitRequired(String), ///

An encryption integrity check failed.

EncryptionIntegrityChecksFailed(String), ///

An encryption key could not be accessed.

@@ -1913,26 +2743,328 @@ pub enum BatchGetRepositoriesError { EncryptionKeyNotFound(String), ///

The encryption key is not available.

EncryptionKeyUnavailable(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified continuation token is not valid.

+ InvalidContinuationToken(String), + ///

The specified value for the number of conflict files to return is not valid.

+ InvalidMaxConflictFiles(String), + ///

The specified value for the number of merge hunks to return is not valid.

+ InvalidMaxMergeHunks(String), + ///

The specified merge option is not valid for this operation. Not all merge strategies are supported for all operations.

+ InvalidMergeOption(String), ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

InvalidRepositoryName(String), - ///

The maximum number of allowed repository names was exceeded. Currently, this number is 25.

- MaximumRepositoryNamesExceeded(String), - ///

A repository names object is required but was not specified.

- RepositoryNamesRequired(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

A merge option or stategy is required, and none was provided.

+ MergeOptionRequired(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), } -impl BatchGetRepositoriesError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl BatchDescribeMergeConflictsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "CommitDoesNotExistException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::CommitDoesNotExist(err.msg), + ) + } + "CommitRequiredException" => { + return RusotoError::Service(BatchDescribeMergeConflictsError::CommitRequired( + err.msg, + )) + } "EncryptionIntegrityChecksFailedException" => { return RusotoError::Service( - BatchGetRepositoriesError::EncryptionIntegrityChecksFailed(err.msg), + BatchDescribeMergeConflictsError::EncryptionIntegrityChecksFailed(err.msg), ) } "EncryptionKeyAccessDeniedException" => { return RusotoError::Service( - BatchGetRepositoriesError::EncryptionKeyAccessDenied(err.msg), + BatchDescribeMergeConflictsError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::EncryptionKeyUnavailable(err.msg), + ) + } + "InvalidCommitException" => { + return RusotoError::Service(BatchDescribeMergeConflictsError::InvalidCommit( + err.msg, + )) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidConflictResolutionStrategy( + err.msg, + ), + ) + } + "InvalidContinuationTokenException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidContinuationToken(err.msg), + ) + } + "InvalidMaxConflictFilesException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidMaxConflictFiles(err.msg), + ) + } + "InvalidMaxMergeHunksException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidMaxMergeHunks(err.msg), + ) + } + "InvalidMergeOptionException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidMergeOption(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::InvalidRepositoryName(err.msg), + ) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MergeOptionRequiredException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::MergeOptionRequired(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::RepositoryNameRequired(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + BatchDescribeMergeConflictsError::TipsDivergenceExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for BatchDescribeMergeConflictsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for BatchDescribeMergeConflictsError { + fn description(&self) -> &str { + match *self { + BatchDescribeMergeConflictsError::CommitDoesNotExist(ref cause) => cause, + BatchDescribeMergeConflictsError::CommitRequired(ref cause) => cause, + BatchDescribeMergeConflictsError::EncryptionIntegrityChecksFailed(ref cause) => cause, + BatchDescribeMergeConflictsError::EncryptionKeyAccessDenied(ref cause) => cause, + BatchDescribeMergeConflictsError::EncryptionKeyDisabled(ref cause) => cause, + BatchDescribeMergeConflictsError::EncryptionKeyNotFound(ref cause) => cause, + BatchDescribeMergeConflictsError::EncryptionKeyUnavailable(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidCommit(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidConflictDetailLevel(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidConflictResolutionStrategy(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidContinuationToken(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidMaxConflictFiles(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidMaxMergeHunks(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidMergeOption(ref cause) => cause, + BatchDescribeMergeConflictsError::InvalidRepositoryName(ref cause) => cause, + BatchDescribeMergeConflictsError::MaximumFileContentToLoadExceeded(ref cause) => cause, + BatchDescribeMergeConflictsError::MaximumItemsToCompareExceeded(ref cause) => cause, + BatchDescribeMergeConflictsError::MergeOptionRequired(ref cause) => cause, + BatchDescribeMergeConflictsError::RepositoryDoesNotExist(ref cause) => cause, + BatchDescribeMergeConflictsError::RepositoryNameRequired(ref cause) => cause, + BatchDescribeMergeConflictsError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by BatchGetCommits +#[derive(Debug, PartialEq)] +pub enum BatchGetCommitsError { + ///

The maximum number of allowed commit IDs in a batch request is 100. Verify that your batch requests contains no more than 100 commit IDs, and then try again.

+ CommitIdsLimitExceeded(String), + + CommitIdsListRequired(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), +} + +impl BatchGetCommitsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "CommitIdsLimitExceededException" => { + return RusotoError::Service(BatchGetCommitsError::CommitIdsLimitExceeded( + err.msg, + )) + } + "CommitIdsListRequiredException" => { + return RusotoError::Service(BatchGetCommitsError::CommitIdsListRequired( + err.msg, + )) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + BatchGetCommitsError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service(BatchGetCommitsError::EncryptionKeyAccessDenied( + err.msg, + )) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service(BatchGetCommitsError::EncryptionKeyDisabled( + err.msg, + )) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service(BatchGetCommitsError::EncryptionKeyNotFound( + err.msg, + )) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service(BatchGetCommitsError::EncryptionKeyUnavailable( + err.msg, + )) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service(BatchGetCommitsError::InvalidRepositoryName( + err.msg, + )) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service(BatchGetCommitsError::RepositoryDoesNotExist( + err.msg, + )) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service(BatchGetCommitsError::RepositoryNameRequired( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for BatchGetCommitsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for BatchGetCommitsError { + fn description(&self) -> &str { + match *self { + BatchGetCommitsError::CommitIdsLimitExceeded(ref cause) => cause, + BatchGetCommitsError::CommitIdsListRequired(ref cause) => cause, + BatchGetCommitsError::EncryptionIntegrityChecksFailed(ref cause) => cause, + BatchGetCommitsError::EncryptionKeyAccessDenied(ref cause) => cause, + BatchGetCommitsError::EncryptionKeyDisabled(ref cause) => cause, + BatchGetCommitsError::EncryptionKeyNotFound(ref cause) => cause, + BatchGetCommitsError::EncryptionKeyUnavailable(ref cause) => cause, + BatchGetCommitsError::InvalidRepositoryName(ref cause) => cause, + BatchGetCommitsError::RepositoryDoesNotExist(ref cause) => cause, + BatchGetCommitsError::RepositoryNameRequired(ref cause) => cause, + } + } +} +/// Errors returned by BatchGetRepositories +#[derive(Debug, PartialEq)] +pub enum BatchGetRepositoriesError { + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The maximum number of allowed repository names was exceeded. Currently, this number is 25.

+ MaximumRepositoryNamesExceeded(String), + ///

A repository names object is required but was not specified.

+ RepositoryNamesRequired(String), +} + +impl BatchGetRepositoriesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + BatchGetRepositoriesError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + BatchGetRepositoriesError::EncryptionKeyAccessDenied(err.msg), ) } "EncryptionKeyDisabledException" => { @@ -2133,7 +3265,7 @@ pub enum CreateCommitError { EncryptionKeyUnavailable(String), ///

The commit cannot be created because both a source file and file content have been specified for the same file. You cannot provide both. Either specify a source file, or provide the file content directly.

FileContentAndSourceFileSpecified(String), - ///

The file cannot be added because it is too large. The maximum file size that can be added using PutFile is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

FileContentSizeLimitExceeded(String), ///

The specified file does not exist. Verify that you have provided the correct name of the file, including its full path and extension.

FileDoesNotExist(String), @@ -2145,7 +3277,7 @@ pub enum CreateCommitError { FileNameConflictsWithDirectoryName(String), ///

The commit cannot be created because a specified file path points to a submodule. Verify that the destination files have valid file paths that do not point to a submodule.

FilePathConflictsWithSubmodulePath(String), - ///

The commit cannot be created because at least one of the overall changes in the commit result in a folder contents exceeding the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

FolderContentSizeLimitExceeded(String), ///

The specified reference name is not valid.

InvalidBranchName(String), @@ -2163,7 +3295,7 @@ pub enum CreateCommitError { InvalidRepositoryName(String), ///

The number of specified files to change as part of this commit exceeds the maximum number of files that can be changed in a single commit. Consider using a Git client for these changes.

MaximumFileEntriesExceeded(String), - ///

The user name is not valid because it has exceeded the character limit for file names. File names, including the path to the file, cannot exceed the character limit.

+ ///

The user name is not valid because it has exceeded the character limit for author names.

NameLengthExceeded(String), ///

The commit cannot be created because no changes will be made to the repository as a result of this commit. A commit must contain at least one change.

NoChange(String), @@ -2739,13 +3871,17 @@ impl Error for CreateRepositoryError { } } } -/// Errors returned by DeleteBranch +/// Errors returned by CreateUnreferencedMergeCommit #[derive(Debug, PartialEq)] -pub enum DeleteBranchError { - ///

A branch name is required but was not specified.

- BranchNameRequired(String), - ///

The specified branch is the default branch for the repository, and cannot be deleted. To delete this branch, you must first set another branch as the default branch.

- DefaultBranchCannotBeDeleted(String), +pub enum CreateUnreferencedMergeCommitError { + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

The commit message is too long. Provide a shorter string.

+ CommitMessageLengthExceeded(String), + ///

A commit was not specified.

+ CommitRequired(String), + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), ///

An encryption integrity check failed.

EncryptionIntegrityChecksFailed(String), ///

An encryption key could not be accessed.

@@ -2756,37 +3892,366 @@ pub enum DeleteBranchError { EncryptionKeyNotFound(String), ///

The encryption key is not available.

EncryptionKeyUnavailable(String), - ///

The specified reference name is not valid.

- InvalidBranchName(String), + ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ FileContentSizeLimitExceeded(String), + ///

The commit cannot be created because a file mode is required to update mode permissions for an existing file, but no file mode has been specified.

+ FileModeRequired(String), + ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ FolderContentSizeLimitExceeded(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution list is not valid.

+ InvalidConflictResolution(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified email address either contains one or more characters that are not allowed, or it exceeds the maximum number of characters allowed for an email address.

+ InvalidEmail(String), + ///

The specified file mode permission is not valid. For a list of valid file mode permissions, see PutFile.

+ InvalidFileMode(String), + ///

The specified merge option is not valid for this operation. Not all merge strategies are supported for all operations.

+ InvalidMergeOption(String), + ///

The specified path is not valid.

+ InvalidPath(String), + ///

Automerge was specified for resolving the conflict, but the replacement type is not valid or content is missing.

+ InvalidReplacementContent(String), + ///

Automerge was specified for resolving the conflict, but the specified replacement type is not valid.

+ InvalidReplacementType(String), ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

InvalidRepositoryName(String), + ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

+ ManualMergeRequired(String), + ///

The number of allowed conflict resolution entries was exceeded.

+ MaximumConflictResolutionEntriesExceeded(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

A merge option or stategy is required, and none was provided.

+ MergeOptionRequired(String), + ///

More than one conflict resolution entries exists for the conflict. A conflict can have only one conflict resolution entry.

+ MultipleConflictResolutionEntries(String), + ///

The user name is not valid because it has exceeded the character limit for author names.

+ NameLengthExceeded(String), + ///

The folderPath for a location cannot be null.

+ PathRequired(String), + ///

USE_NEW_CONTENT was specified but no replacement content has been provided.

+ ReplacementContentRequired(String), + ///

A replacement type is required.

+ ReplacementTypeRequired(String), ///

The specified repository does not exist.

RepositoryDoesNotExist(String), ///

A repository name is required but was not specified.

RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), } -impl DeleteBranchError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateUnreferencedMergeCommitError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "BranchNameRequiredException" => { - return RusotoError::Service(DeleteBranchError::BranchNameRequired(err.msg)) + "CommitDoesNotExistException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::CommitDoesNotExist(err.msg), + ) } - "DefaultBranchCannotBeDeletedException" => { - return RusotoError::Service(DeleteBranchError::DefaultBranchCannotBeDeleted( - err.msg, - )) + "CommitMessageLengthExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::CommitMessageLengthExceeded(err.msg), + ) + } + "CommitRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::CommitRequired(err.msg), + ) + } + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::ConcurrentReferenceUpdate(err.msg), + ) } "EncryptionIntegrityChecksFailedException" => { return RusotoError::Service( - DeleteBranchError::EncryptionIntegrityChecksFailed(err.msg), + CreateUnreferencedMergeCommitError::EncryptionIntegrityChecksFailed( + err.msg, + ), ) } "EncryptionKeyAccessDeniedException" => { - return RusotoError::Service(DeleteBranchError::EncryptionKeyAccessDenied( - err.msg, - )) + return RusotoError::Service( + CreateUnreferencedMergeCommitError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::EncryptionKeyUnavailable(err.msg), + ) + } + "FileContentSizeLimitExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::FileContentSizeLimitExceeded(err.msg), + ) + } + "FileModeRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::FileModeRequired(err.msg), + ) + } + "FolderContentSizeLimitExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::FolderContentSizeLimitExceeded(err.msg), + ) + } + "InvalidCommitException" => { + return RusotoError::Service(CreateUnreferencedMergeCommitError::InvalidCommit( + err.msg, + )) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidConflictResolution(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidConflictResolutionStrategy( + err.msg, + ), + ) + } + "InvalidEmailException" => { + return RusotoError::Service(CreateUnreferencedMergeCommitError::InvalidEmail( + err.msg, + )) + } + "InvalidFileModeException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidFileMode(err.msg), + ) + } + "InvalidMergeOptionException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidMergeOption(err.msg), + ) + } + "InvalidPathException" => { + return RusotoError::Service(CreateUnreferencedMergeCommitError::InvalidPath( + err.msg, + )) + } + "InvalidReplacementContentException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidReplacementContent(err.msg), + ) + } + "InvalidReplacementTypeException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidReplacementType(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::InvalidRepositoryName(err.msg), + ) + } + "ManualMergeRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::ManualMergeRequired(err.msg), + ) + } + "MaximumConflictResolutionEntriesExceededException" => return RusotoError::Service( + CreateUnreferencedMergeCommitError::MaximumConflictResolutionEntriesExceeded( + err.msg, + ), + ), + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::MaximumFileContentToLoadExceeded( + err.msg, + ), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MergeOptionRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::MergeOptionRequired(err.msg), + ) + } + "MultipleConflictResolutionEntriesException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::MultipleConflictResolutionEntries( + err.msg, + ), + ) + } + "NameLengthExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::NameLengthExceeded(err.msg), + ) + } + "PathRequiredException" => { + return RusotoError::Service(CreateUnreferencedMergeCommitError::PathRequired( + err.msg, + )) + } + "ReplacementContentRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::ReplacementContentRequired(err.msg), + ) + } + "ReplacementTypeRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::ReplacementTypeRequired(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::RepositoryNameRequired(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + CreateUnreferencedMergeCommitError::TipsDivergenceExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateUnreferencedMergeCommitError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateUnreferencedMergeCommitError { + fn description(&self) -> &str { + match *self { + CreateUnreferencedMergeCommitError::CommitDoesNotExist(ref cause) => cause, + CreateUnreferencedMergeCommitError::CommitMessageLengthExceeded(ref cause) => cause, + CreateUnreferencedMergeCommitError::CommitRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::ConcurrentReferenceUpdate(ref cause) => cause, + CreateUnreferencedMergeCommitError::EncryptionIntegrityChecksFailed(ref cause) => cause, + CreateUnreferencedMergeCommitError::EncryptionKeyAccessDenied(ref cause) => cause, + CreateUnreferencedMergeCommitError::EncryptionKeyDisabled(ref cause) => cause, + CreateUnreferencedMergeCommitError::EncryptionKeyNotFound(ref cause) => cause, + CreateUnreferencedMergeCommitError::EncryptionKeyUnavailable(ref cause) => cause, + CreateUnreferencedMergeCommitError::FileContentSizeLimitExceeded(ref cause) => cause, + CreateUnreferencedMergeCommitError::FileModeRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::FolderContentSizeLimitExceeded(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidCommit(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidConflictDetailLevel(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidConflictResolution(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidConflictResolutionStrategy(ref cause) => { + cause + } + CreateUnreferencedMergeCommitError::InvalidEmail(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidFileMode(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidMergeOption(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidPath(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidReplacementContent(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidReplacementType(ref cause) => cause, + CreateUnreferencedMergeCommitError::InvalidRepositoryName(ref cause) => cause, + CreateUnreferencedMergeCommitError::ManualMergeRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::MaximumConflictResolutionEntriesExceeded( + ref cause, + ) => cause, + CreateUnreferencedMergeCommitError::MaximumFileContentToLoadExceeded(ref cause) => { + cause + } + CreateUnreferencedMergeCommitError::MaximumItemsToCompareExceeded(ref cause) => cause, + CreateUnreferencedMergeCommitError::MergeOptionRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::MultipleConflictResolutionEntries(ref cause) => { + cause + } + CreateUnreferencedMergeCommitError::NameLengthExceeded(ref cause) => cause, + CreateUnreferencedMergeCommitError::PathRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::ReplacementContentRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::ReplacementTypeRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::RepositoryDoesNotExist(ref cause) => cause, + CreateUnreferencedMergeCommitError::RepositoryNameRequired(ref cause) => cause, + CreateUnreferencedMergeCommitError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by DeleteBranch +#[derive(Debug, PartialEq)] +pub enum DeleteBranchError { + ///

A branch name is required but was not specified.

+ BranchNameRequired(String), + ///

The specified branch is the default branch for the repository, and cannot be deleted. To delete this branch, you must first set another branch as the default branch.

+ DefaultBranchCannotBeDeleted(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The specified reference name is not valid.

+ InvalidBranchName(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), +} + +impl DeleteBranchError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "BranchNameRequiredException" => { + return RusotoError::Service(DeleteBranchError::BranchNameRequired(err.msg)) + } + "DefaultBranchCannotBeDeletedException" => { + return RusotoError::Service(DeleteBranchError::DefaultBranchCannotBeDeleted( + err.msg, + )) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + DeleteBranchError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service(DeleteBranchError::EncryptionKeyAccessDenied( + err.msg, + )) } "EncryptionKeyDisabledException" => { return RusotoError::Service(DeleteBranchError::EncryptionKeyDisabled(err.msg)) @@ -2930,7 +4395,7 @@ pub enum DeleteFileError { InvalidPath(String), ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

InvalidRepositoryName(String), - ///

The user name is not valid because it has exceeded the character limit for file names. File names, including the path to the file, cannot exceed the character limit.

+ ///

The user name is not valid because it has exceeded the character limit for author names.

NameLengthExceeded(String), ///

The parent commit ID is not valid because it does not exist. The specified parent commit ID does not exist in the specified branch of the repository.

ParentCommitDoesNotExist(String), @@ -3145,11 +4610,13 @@ impl Error for DeleteRepositoryError { } } } -/// Errors returned by DescribePullRequestEvents +/// Errors returned by DescribeMergeConflicts #[derive(Debug, PartialEq)] -pub enum DescribePullRequestEventsError { - ///

The specified Amazon Resource Name (ARN) does not exist in the AWS account.

- ActorDoesNotExist(String), +pub enum DescribeMergeConflictsError { + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

A commit was not specified.

+ CommitRequired(String), ///

An encryption integrity check failed.

EncryptionIntegrityChecksFailed(String), ///

An encryption key could not be accessed.

@@ -3160,82 +4627,287 @@ pub enum DescribePullRequestEventsError { EncryptionKeyNotFound(String), ///

The encryption key is not available.

EncryptionKeyUnavailable(String), - ///

The Amazon Resource Name (ARN) is not valid. Make sure that you have provided the full ARN for the user who initiated the change for the pull request, and then try again.

- InvalidActorArn(String), + ///

The specified file does not exist. Verify that you have provided the correct name of the file, including its full path and extension.

+ FileDoesNotExist(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), ///

The specified continuation token is not valid.

InvalidContinuationToken(String), - ///

The specified number of maximum results is not valid.

- InvalidMaxResults(String), - ///

The pull request event type is not valid.

- InvalidPullRequestEventType(String), - ///

The pull request ID is not valid. Make sure that you have provided the full ID and that the pull request is in the specified repository, and then try again.

- InvalidPullRequestId(String), - ///

The pull request ID could not be found. Make sure that you have specified the correct repository name and pull request ID, and then try again.

- PullRequestDoesNotExist(String), - ///

A pull request ID is required, but none was provided.

- PullRequestIdRequired(String), + ///

The specified value for the number of merge hunks to return is not valid.

+ InvalidMaxMergeHunks(String), + ///

The specified merge option is not valid for this operation. Not all merge strategies are supported for all operations.

+ InvalidMergeOption(String), + ///

The specified path is not valid.

+ InvalidPath(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

A merge option or stategy is required, and none was provided.

+ MergeOptionRequired(String), + ///

The folderPath for a location cannot be null.

+ PathRequired(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), } -impl DescribePullRequestEventsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeMergeConflictsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "ActorDoesNotExistException" => { - return RusotoError::Service(DescribePullRequestEventsError::ActorDoesNotExist( + "CommitDoesNotExistException" => { + return RusotoError::Service(DescribeMergeConflictsError::CommitDoesNotExist( + err.msg, + )) + } + "CommitRequiredException" => { + return RusotoError::Service(DescribeMergeConflictsError::CommitRequired( err.msg, )) } "EncryptionIntegrityChecksFailedException" => { return RusotoError::Service( - DescribePullRequestEventsError::EncryptionIntegrityChecksFailed(err.msg), + DescribeMergeConflictsError::EncryptionIntegrityChecksFailed(err.msg), ) } "EncryptionKeyAccessDeniedException" => { return RusotoError::Service( - DescribePullRequestEventsError::EncryptionKeyAccessDenied(err.msg), + DescribeMergeConflictsError::EncryptionKeyAccessDenied(err.msg), ) } "EncryptionKeyDisabledException" => { return RusotoError::Service( - DescribePullRequestEventsError::EncryptionKeyDisabled(err.msg), + DescribeMergeConflictsError::EncryptionKeyDisabled(err.msg), ) } "EncryptionKeyNotFoundException" => { return RusotoError::Service( - DescribePullRequestEventsError::EncryptionKeyNotFound(err.msg), + DescribeMergeConflictsError::EncryptionKeyNotFound(err.msg), ) } "EncryptionKeyUnavailableException" => { return RusotoError::Service( - DescribePullRequestEventsError::EncryptionKeyUnavailable(err.msg), + DescribeMergeConflictsError::EncryptionKeyUnavailable(err.msg), ) } - "InvalidActorArnException" => { - return RusotoError::Service(DescribePullRequestEventsError::InvalidActorArn( + "FileDoesNotExistException" => { + return RusotoError::Service(DescribeMergeConflictsError::FileDoesNotExist( err.msg, )) } - "InvalidContinuationTokenException" => { - return RusotoError::Service( - DescribePullRequestEventsError::InvalidContinuationToken(err.msg), - ) - } - "InvalidMaxResultsException" => { - return RusotoError::Service(DescribePullRequestEventsError::InvalidMaxResults( + "InvalidCommitException" => { + return RusotoError::Service(DescribeMergeConflictsError::InvalidCommit( err.msg, )) } - "InvalidPullRequestEventTypeException" => { + "InvalidConflictDetailLevelException" => { return RusotoError::Service( - DescribePullRequestEventsError::InvalidPullRequestEventType(err.msg), + DescribeMergeConflictsError::InvalidConflictDetailLevel(err.msg), ) } - "InvalidPullRequestIdException" => { + "InvalidConflictResolutionStrategyException" => { return RusotoError::Service( - DescribePullRequestEventsError::InvalidPullRequestId(err.msg), + DescribeMergeConflictsError::InvalidConflictResolutionStrategy(err.msg), ) } - "PullRequestDoesNotExistException" => { + "InvalidContinuationTokenException" => { + return RusotoError::Service( + DescribeMergeConflictsError::InvalidContinuationToken(err.msg), + ) + } + "InvalidMaxMergeHunksException" => { + return RusotoError::Service(DescribeMergeConflictsError::InvalidMaxMergeHunks( + err.msg, + )) + } + "InvalidMergeOptionException" => { + return RusotoError::Service(DescribeMergeConflictsError::InvalidMergeOption( + err.msg, + )) + } + "InvalidPathException" => { + return RusotoError::Service(DescribeMergeConflictsError::InvalidPath(err.msg)) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + DescribeMergeConflictsError::InvalidRepositoryName(err.msg), + ) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + DescribeMergeConflictsError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + DescribeMergeConflictsError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MergeOptionRequiredException" => { + return RusotoError::Service(DescribeMergeConflictsError::MergeOptionRequired( + err.msg, + )) + } + "PathRequiredException" => { + return RusotoError::Service(DescribeMergeConflictsError::PathRequired(err.msg)) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + DescribeMergeConflictsError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + DescribeMergeConflictsError::RepositoryNameRequired(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + DescribeMergeConflictsError::TipsDivergenceExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeMergeConflictsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeMergeConflictsError { + fn description(&self) -> &str { + match *self { + DescribeMergeConflictsError::CommitDoesNotExist(ref cause) => cause, + DescribeMergeConflictsError::CommitRequired(ref cause) => cause, + DescribeMergeConflictsError::EncryptionIntegrityChecksFailed(ref cause) => cause, + DescribeMergeConflictsError::EncryptionKeyAccessDenied(ref cause) => cause, + DescribeMergeConflictsError::EncryptionKeyDisabled(ref cause) => cause, + DescribeMergeConflictsError::EncryptionKeyNotFound(ref cause) => cause, + DescribeMergeConflictsError::EncryptionKeyUnavailable(ref cause) => cause, + DescribeMergeConflictsError::FileDoesNotExist(ref cause) => cause, + DescribeMergeConflictsError::InvalidCommit(ref cause) => cause, + DescribeMergeConflictsError::InvalidConflictDetailLevel(ref cause) => cause, + DescribeMergeConflictsError::InvalidConflictResolutionStrategy(ref cause) => cause, + DescribeMergeConflictsError::InvalidContinuationToken(ref cause) => cause, + DescribeMergeConflictsError::InvalidMaxMergeHunks(ref cause) => cause, + DescribeMergeConflictsError::InvalidMergeOption(ref cause) => cause, + DescribeMergeConflictsError::InvalidPath(ref cause) => cause, + DescribeMergeConflictsError::InvalidRepositoryName(ref cause) => cause, + DescribeMergeConflictsError::MaximumFileContentToLoadExceeded(ref cause) => cause, + DescribeMergeConflictsError::MaximumItemsToCompareExceeded(ref cause) => cause, + DescribeMergeConflictsError::MergeOptionRequired(ref cause) => cause, + DescribeMergeConflictsError::PathRequired(ref cause) => cause, + DescribeMergeConflictsError::RepositoryDoesNotExist(ref cause) => cause, + DescribeMergeConflictsError::RepositoryNameRequired(ref cause) => cause, + DescribeMergeConflictsError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by DescribePullRequestEvents +#[derive(Debug, PartialEq)] +pub enum DescribePullRequestEventsError { + ///

The specified Amazon Resource Name (ARN) does not exist in the AWS account.

+ ActorDoesNotExist(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The Amazon Resource Name (ARN) is not valid. Make sure that you have provided the full ARN for the user who initiated the change for the pull request, and then try again.

+ InvalidActorArn(String), + ///

The specified continuation token is not valid.

+ InvalidContinuationToken(String), + ///

The specified number of maximum results is not valid.

+ InvalidMaxResults(String), + ///

The pull request event type is not valid.

+ InvalidPullRequestEventType(String), + ///

The pull request ID is not valid. Make sure that you have provided the full ID and that the pull request is in the specified repository, and then try again.

+ InvalidPullRequestId(String), + ///

The pull request ID could not be found. Make sure that you have specified the correct repository name and pull request ID, and then try again.

+ PullRequestDoesNotExist(String), + ///

A pull request ID is required, but none was provided.

+ PullRequestIdRequired(String), +} + +impl DescribePullRequestEventsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ActorDoesNotExistException" => { + return RusotoError::Service(DescribePullRequestEventsError::ActorDoesNotExist( + err.msg, + )) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + DescribePullRequestEventsError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + DescribePullRequestEventsError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + DescribePullRequestEventsError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + DescribePullRequestEventsError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + DescribePullRequestEventsError::EncryptionKeyUnavailable(err.msg), + ) + } + "InvalidActorArnException" => { + return RusotoError::Service(DescribePullRequestEventsError::InvalidActorArn( + err.msg, + )) + } + "InvalidContinuationTokenException" => { + return RusotoError::Service( + DescribePullRequestEventsError::InvalidContinuationToken(err.msg), + ) + } + "InvalidMaxResultsException" => { + return RusotoError::Service(DescribePullRequestEventsError::InvalidMaxResults( + err.msg, + )) + } + "InvalidPullRequestEventTypeException" => { + return RusotoError::Service( + DescribePullRequestEventsError::InvalidPullRequestEventType(err.msg), + ) + } + "InvalidPullRequestIdException" => { + return RusotoError::Service( + DescribePullRequestEventsError::InvalidPullRequestId(err.msg), + ) + } + "PullRequestDoesNotExistException" => { return RusotoError::Service( DescribePullRequestEventsError::PullRequestDoesNotExist(err.msg), ) @@ -4279,6 +5951,131 @@ impl Error for GetFolderError { } } } +/// Errors returned by GetMergeCommit +#[derive(Debug, PartialEq)] +pub enum GetMergeCommitError { + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

A commit was not specified.

+ CommitRequired(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), +} + +impl GetMergeCommitError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "CommitDoesNotExistException" => { + return RusotoError::Service(GetMergeCommitError::CommitDoesNotExist(err.msg)) + } + "CommitRequiredException" => { + return RusotoError::Service(GetMergeCommitError::CommitRequired(err.msg)) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + GetMergeCommitError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service(GetMergeCommitError::EncryptionKeyAccessDenied( + err.msg, + )) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service(GetMergeCommitError::EncryptionKeyDisabled( + err.msg, + )) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service(GetMergeCommitError::EncryptionKeyNotFound( + err.msg, + )) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service(GetMergeCommitError::EncryptionKeyUnavailable( + err.msg, + )) + } + "InvalidCommitException" => { + return RusotoError::Service(GetMergeCommitError::InvalidCommit(err.msg)) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service(GetMergeCommitError::InvalidConflictDetailLevel( + err.msg, + )) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + GetMergeCommitError::InvalidConflictResolutionStrategy(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service(GetMergeCommitError::InvalidRepositoryName( + err.msg, + )) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service(GetMergeCommitError::RepositoryDoesNotExist( + err.msg, + )) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service(GetMergeCommitError::RepositoryNameRequired( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetMergeCommitError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetMergeCommitError { + fn description(&self) -> &str { + match *self { + GetMergeCommitError::CommitDoesNotExist(ref cause) => cause, + GetMergeCommitError::CommitRequired(ref cause) => cause, + GetMergeCommitError::EncryptionIntegrityChecksFailed(ref cause) => cause, + GetMergeCommitError::EncryptionKeyAccessDenied(ref cause) => cause, + GetMergeCommitError::EncryptionKeyDisabled(ref cause) => cause, + GetMergeCommitError::EncryptionKeyNotFound(ref cause) => cause, + GetMergeCommitError::EncryptionKeyUnavailable(ref cause) => cause, + GetMergeCommitError::InvalidCommit(ref cause) => cause, + GetMergeCommitError::InvalidConflictDetailLevel(ref cause) => cause, + GetMergeCommitError::InvalidConflictResolutionStrategy(ref cause) => cause, + GetMergeCommitError::InvalidRepositoryName(ref cause) => cause, + GetMergeCommitError::RepositoryDoesNotExist(ref cause) => cause, + GetMergeCommitError::RepositoryNameRequired(ref cause) => cause, + } + } +} /// Errors returned by GetMergeConflicts #[derive(Debug, PartialEq)] pub enum GetMergeConflictsError { @@ -4298,14 +6095,26 @@ pub enum GetMergeConflictsError { EncryptionKeyUnavailable(String), ///

The specified commit is not valid.

InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified continuation token is not valid.

+ InvalidContinuationToken(String), ///

The destination commit specifier is not valid. You must provide a valid branch name, tag, or full commit ID.

InvalidDestinationCommitSpecifier(String), - ///

The specified merge option is not valid. The only valid value is FAST_FORWARD_MERGE.

+ ///

The specified value for the number of conflict files to return is not valid.

+ InvalidMaxConflictFiles(String), + ///

The specified merge option is not valid for this operation. Not all merge strategies are supported for all operations.

InvalidMergeOption(String), ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

InvalidRepositoryName(String), ///

The source commit specifier is not valid. You must provide a valid branch name, tag, or full commit ID.

InvalidSourceCommitSpecifier(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), ///

A merge option or stategy is required, and none was provided.

MergeOptionRequired(String), ///

The specified repository does not exist.

@@ -4356,11 +6165,31 @@ impl GetMergeConflictsError { "InvalidCommitException" => { return RusotoError::Service(GetMergeConflictsError::InvalidCommit(err.msg)) } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service( + GetMergeConflictsError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + GetMergeConflictsError::InvalidConflictResolutionStrategy(err.msg), + ) + } + "InvalidContinuationTokenException" => { + return RusotoError::Service(GetMergeConflictsError::InvalidContinuationToken( + err.msg, + )) + } "InvalidDestinationCommitSpecifierException" => { return RusotoError::Service( GetMergeConflictsError::InvalidDestinationCommitSpecifier(err.msg), ) } + "InvalidMaxConflictFilesException" => { + return RusotoError::Service(GetMergeConflictsError::InvalidMaxConflictFiles( + err.msg, + )) + } "InvalidMergeOptionException" => { return RusotoError::Service(GetMergeConflictsError::InvalidMergeOption( err.msg, @@ -4376,6 +6205,16 @@ impl GetMergeConflictsError { GetMergeConflictsError::InvalidSourceCommitSpecifier(err.msg), ) } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + GetMergeConflictsError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + GetMergeConflictsError::MaximumItemsToCompareExceeded(err.msg), + ) + } "MergeOptionRequiredException" => { return RusotoError::Service(GetMergeConflictsError::MergeOptionRequired( err.msg, @@ -4419,10 +6258,16 @@ impl Error for GetMergeConflictsError { GetMergeConflictsError::EncryptionKeyNotFound(ref cause) => cause, GetMergeConflictsError::EncryptionKeyUnavailable(ref cause) => cause, GetMergeConflictsError::InvalidCommit(ref cause) => cause, + GetMergeConflictsError::InvalidConflictDetailLevel(ref cause) => cause, + GetMergeConflictsError::InvalidConflictResolutionStrategy(ref cause) => cause, + GetMergeConflictsError::InvalidContinuationToken(ref cause) => cause, GetMergeConflictsError::InvalidDestinationCommitSpecifier(ref cause) => cause, + GetMergeConflictsError::InvalidMaxConflictFiles(ref cause) => cause, GetMergeConflictsError::InvalidMergeOption(ref cause) => cause, GetMergeConflictsError::InvalidRepositoryName(ref cause) => cause, GetMergeConflictsError::InvalidSourceCommitSpecifier(ref cause) => cause, + GetMergeConflictsError::MaximumFileContentToLoadExceeded(ref cause) => cause, + GetMergeConflictsError::MaximumItemsToCompareExceeded(ref cause) => cause, GetMergeConflictsError::MergeOptionRequired(ref cause) => cause, GetMergeConflictsError::RepositoryDoesNotExist(ref cause) => cause, GetMergeConflictsError::RepositoryNameRequired(ref cause) => cause, @@ -4430,7 +6275,156 @@ impl Error for GetMergeConflictsError { } } } -/// Errors returned by GetPullRequest +/// Errors returned by GetMergeOptions +#[derive(Debug, PartialEq)] +pub enum GetMergeOptionsError { + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

A commit was not specified.

+ CommitRequired(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), +} + +impl GetMergeOptionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "CommitDoesNotExistException" => { + return RusotoError::Service(GetMergeOptionsError::CommitDoesNotExist(err.msg)) + } + "CommitRequiredException" => { + return RusotoError::Service(GetMergeOptionsError::CommitRequired(err.msg)) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + GetMergeOptionsError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service(GetMergeOptionsError::EncryptionKeyAccessDenied( + err.msg, + )) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service(GetMergeOptionsError::EncryptionKeyDisabled( + err.msg, + )) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service(GetMergeOptionsError::EncryptionKeyNotFound( + err.msg, + )) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service(GetMergeOptionsError::EncryptionKeyUnavailable( + err.msg, + )) + } + "InvalidCommitException" => { + return RusotoError::Service(GetMergeOptionsError::InvalidCommit(err.msg)) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service(GetMergeOptionsError::InvalidConflictDetailLevel( + err.msg, + )) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + GetMergeOptionsError::InvalidConflictResolutionStrategy(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service(GetMergeOptionsError::InvalidRepositoryName( + err.msg, + )) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + GetMergeOptionsError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + GetMergeOptionsError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service(GetMergeOptionsError::RepositoryDoesNotExist( + err.msg, + )) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service(GetMergeOptionsError::RepositoryNameRequired( + err.msg, + )) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service(GetMergeOptionsError::TipsDivergenceExceeded( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetMergeOptionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetMergeOptionsError { + fn description(&self) -> &str { + match *self { + GetMergeOptionsError::CommitDoesNotExist(ref cause) => cause, + GetMergeOptionsError::CommitRequired(ref cause) => cause, + GetMergeOptionsError::EncryptionIntegrityChecksFailed(ref cause) => cause, + GetMergeOptionsError::EncryptionKeyAccessDenied(ref cause) => cause, + GetMergeOptionsError::EncryptionKeyDisabled(ref cause) => cause, + GetMergeOptionsError::EncryptionKeyNotFound(ref cause) => cause, + GetMergeOptionsError::EncryptionKeyUnavailable(ref cause) => cause, + GetMergeOptionsError::InvalidCommit(ref cause) => cause, + GetMergeOptionsError::InvalidConflictDetailLevel(ref cause) => cause, + GetMergeOptionsError::InvalidConflictResolutionStrategy(ref cause) => cause, + GetMergeOptionsError::InvalidRepositoryName(ref cause) => cause, + GetMergeOptionsError::MaximumFileContentToLoadExceeded(ref cause) => cause, + GetMergeOptionsError::MaximumItemsToCompareExceeded(ref cause) => cause, + GetMergeOptionsError::RepositoryDoesNotExist(ref cause) => cause, + GetMergeOptionsError::RepositoryNameRequired(ref cause) => cause, + GetMergeOptionsError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by GetPullRequest #[derive(Debug, PartialEq)] pub enum GetPullRequestError { ///

An encryption integrity check failed.

@@ -4978,21 +6972,1353 @@ impl ListTagsForResourceError { err.msg, )) } - "InvalidResourceArnException" => { - return RusotoError::Service(ListTagsForResourceError::InvalidResourceArn( + "InvalidResourceArnException" => { + return RusotoError::Service(ListTagsForResourceError::InvalidResourceArn( + err.msg, + )) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service(ListTagsForResourceError::RepositoryDoesNotExist( + err.msg, + )) + } + "ResourceArnRequiredException" => { + return RusotoError::Service(ListTagsForResourceError::ResourceArnRequired( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::InvalidRepositoryName(ref cause) => cause, + ListTagsForResourceError::InvalidResourceArn(ref cause) => cause, + ListTagsForResourceError::RepositoryDoesNotExist(ref cause) => cause, + ListTagsForResourceError::ResourceArnRequired(ref cause) => cause, + } + } +} +/// Errors returned by MergeBranchesByFastForward +#[derive(Debug, PartialEq)] +pub enum MergeBranchesByFastForwardError { + ///

The specified branch does not exist.

+ BranchDoesNotExist(String), + ///

The specified branch name is not valid because it is a tag name. Type the name of a current branch in the repository. For a list of valid branch names, use ListBranches.

+ BranchNameIsTagName(String), + ///

A branch name is required but was not specified.

+ BranchNameRequired(String), + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

A commit was not specified.

+ CommitRequired(String), + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The specified reference name is not valid.

+ InvalidBranchName(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The specified target branch is not valid.

+ InvalidTargetBranch(String), + ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

+ ManualMergeRequired(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), +} + +impl MergeBranchesByFastForwardError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "BranchDoesNotExistException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::BranchDoesNotExist(err.msg), + ) + } + "BranchNameIsTagNameException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::BranchNameIsTagName(err.msg), + ) + } + "BranchNameRequiredException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::BranchNameRequired(err.msg), + ) + } + "CommitDoesNotExistException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::CommitDoesNotExist(err.msg), + ) + } + "CommitRequiredException" => { + return RusotoError::Service(MergeBranchesByFastForwardError::CommitRequired( + err.msg, + )) + } + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::ConcurrentReferenceUpdate(err.msg), + ) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::EncryptionKeyUnavailable(err.msg), + ) + } + "InvalidBranchNameException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::InvalidBranchName(err.msg), + ) + } + "InvalidCommitException" => { + return RusotoError::Service(MergeBranchesByFastForwardError::InvalidCommit( + err.msg, + )) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::InvalidRepositoryName(err.msg), + ) + } + "InvalidTargetBranchException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::InvalidTargetBranch(err.msg), + ) + } + "ManualMergeRequiredException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::ManualMergeRequired(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::RepositoryNameRequired(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + MergeBranchesByFastForwardError::TipsDivergenceExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for MergeBranchesByFastForwardError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for MergeBranchesByFastForwardError { + fn description(&self) -> &str { + match *self { + MergeBranchesByFastForwardError::BranchDoesNotExist(ref cause) => cause, + MergeBranchesByFastForwardError::BranchNameIsTagName(ref cause) => cause, + MergeBranchesByFastForwardError::BranchNameRequired(ref cause) => cause, + MergeBranchesByFastForwardError::CommitDoesNotExist(ref cause) => cause, + MergeBranchesByFastForwardError::CommitRequired(ref cause) => cause, + MergeBranchesByFastForwardError::ConcurrentReferenceUpdate(ref cause) => cause, + MergeBranchesByFastForwardError::EncryptionIntegrityChecksFailed(ref cause) => cause, + MergeBranchesByFastForwardError::EncryptionKeyAccessDenied(ref cause) => cause, + MergeBranchesByFastForwardError::EncryptionKeyDisabled(ref cause) => cause, + MergeBranchesByFastForwardError::EncryptionKeyNotFound(ref cause) => cause, + MergeBranchesByFastForwardError::EncryptionKeyUnavailable(ref cause) => cause, + MergeBranchesByFastForwardError::InvalidBranchName(ref cause) => cause, + MergeBranchesByFastForwardError::InvalidCommit(ref cause) => cause, + MergeBranchesByFastForwardError::InvalidRepositoryName(ref cause) => cause, + MergeBranchesByFastForwardError::InvalidTargetBranch(ref cause) => cause, + MergeBranchesByFastForwardError::ManualMergeRequired(ref cause) => cause, + MergeBranchesByFastForwardError::RepositoryDoesNotExist(ref cause) => cause, + MergeBranchesByFastForwardError::RepositoryNameRequired(ref cause) => cause, + MergeBranchesByFastForwardError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by MergeBranchesBySquash +#[derive(Debug, PartialEq)] +pub enum MergeBranchesBySquashError { + ///

The specified branch does not exist.

+ BranchDoesNotExist(String), + ///

The specified branch name is not valid because it is a tag name. Type the name of a current branch in the repository. For a list of valid branch names, use ListBranches.

+ BranchNameIsTagName(String), + ///

A branch name is required but was not specified.

+ BranchNameRequired(String), + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

The commit message is too long. Provide a shorter string.

+ CommitMessageLengthExceeded(String), + ///

A commit was not specified.

+ CommitRequired(String), + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ FileContentSizeLimitExceeded(String), + ///

The commit cannot be created because a file mode is required to update mode permissions for an existing file, but no file mode has been specified.

+ FileModeRequired(String), + ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ FolderContentSizeLimitExceeded(String), + ///

The specified reference name is not valid.

+ InvalidBranchName(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution list is not valid.

+ InvalidConflictResolution(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified email address either contains one or more characters that are not allowed, or it exceeds the maximum number of characters allowed for an email address.

+ InvalidEmail(String), + ///

The specified file mode permission is not valid. For a list of valid file mode permissions, see PutFile.

+ InvalidFileMode(String), + ///

The specified path is not valid.

+ InvalidPath(String), + ///

Automerge was specified for resolving the conflict, but the replacement type is not valid or content is missing.

+ InvalidReplacementContent(String), + ///

Automerge was specified for resolving the conflict, but the specified replacement type is not valid.

+ InvalidReplacementType(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The specified target branch is not valid.

+ InvalidTargetBranch(String), + ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

+ ManualMergeRequired(String), + ///

The number of allowed conflict resolution entries was exceeded.

+ MaximumConflictResolutionEntriesExceeded(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

More than one conflict resolution entries exists for the conflict. A conflict can have only one conflict resolution entry.

+ MultipleConflictResolutionEntries(String), + ///

The user name is not valid because it has exceeded the character limit for author names.

+ NameLengthExceeded(String), + ///

The folderPath for a location cannot be null.

+ PathRequired(String), + ///

USE_NEW_CONTENT was specified but no replacement content has been provided.

+ ReplacementContentRequired(String), + ///

A replacement type is required.

+ ReplacementTypeRequired(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), +} + +impl MergeBranchesBySquashError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "BranchDoesNotExistException" => { + return RusotoError::Service(MergeBranchesBySquashError::BranchDoesNotExist( + err.msg, + )) + } + "BranchNameIsTagNameException" => { + return RusotoError::Service(MergeBranchesBySquashError::BranchNameIsTagName( + err.msg, + )) + } + "BranchNameRequiredException" => { + return RusotoError::Service(MergeBranchesBySquashError::BranchNameRequired( + err.msg, + )) + } + "CommitDoesNotExistException" => { + return RusotoError::Service(MergeBranchesBySquashError::CommitDoesNotExist( + err.msg, + )) + } + "CommitMessageLengthExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::CommitMessageLengthExceeded(err.msg), + ) + } + "CommitRequiredException" => { + return RusotoError::Service(MergeBranchesBySquashError::CommitRequired( + err.msg, + )) + } + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + MergeBranchesBySquashError::ConcurrentReferenceUpdate(err.msg), + ) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + MergeBranchesBySquashError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + MergeBranchesBySquashError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service(MergeBranchesBySquashError::EncryptionKeyDisabled( + err.msg, + )) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service(MergeBranchesBySquashError::EncryptionKeyNotFound( + err.msg, + )) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + MergeBranchesBySquashError::EncryptionKeyUnavailable(err.msg), + ) + } + "FileContentSizeLimitExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::FileContentSizeLimitExceeded(err.msg), + ) + } + "FileModeRequiredException" => { + return RusotoError::Service(MergeBranchesBySquashError::FileModeRequired( + err.msg, + )) + } + "FolderContentSizeLimitExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::FolderContentSizeLimitExceeded(err.msg), + ) + } + "InvalidBranchNameException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidBranchName( + err.msg, + )) + } + "InvalidCommitException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidCommit(err.msg)) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service( + MergeBranchesBySquashError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionException" => { + return RusotoError::Service( + MergeBranchesBySquashError::InvalidConflictResolution(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + MergeBranchesBySquashError::InvalidConflictResolutionStrategy(err.msg), + ) + } + "InvalidEmailException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidEmail(err.msg)) + } + "InvalidFileModeException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidFileMode( + err.msg, + )) + } + "InvalidPathException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidPath(err.msg)) + } + "InvalidReplacementContentException" => { + return RusotoError::Service( + MergeBranchesBySquashError::InvalidReplacementContent(err.msg), + ) + } + "InvalidReplacementTypeException" => { + return RusotoError::Service( + MergeBranchesBySquashError::InvalidReplacementType(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidRepositoryName( + err.msg, + )) + } + "InvalidTargetBranchException" => { + return RusotoError::Service(MergeBranchesBySquashError::InvalidTargetBranch( + err.msg, + )) + } + "ManualMergeRequiredException" => { + return RusotoError::Service(MergeBranchesBySquashError::ManualMergeRequired( + err.msg, + )) + } + "MaximumConflictResolutionEntriesExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::MaximumConflictResolutionEntriesExceeded( + err.msg, + ), + ) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MultipleConflictResolutionEntriesException" => { + return RusotoError::Service( + MergeBranchesBySquashError::MultipleConflictResolutionEntries(err.msg), + ) + } + "NameLengthExceededException" => { + return RusotoError::Service(MergeBranchesBySquashError::NameLengthExceeded( + err.msg, + )) + } + "PathRequiredException" => { + return RusotoError::Service(MergeBranchesBySquashError::PathRequired(err.msg)) + } + "ReplacementContentRequiredException" => { + return RusotoError::Service( + MergeBranchesBySquashError::ReplacementContentRequired(err.msg), + ) + } + "ReplacementTypeRequiredException" => { + return RusotoError::Service( + MergeBranchesBySquashError::ReplacementTypeRequired(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + MergeBranchesBySquashError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + MergeBranchesBySquashError::RepositoryNameRequired(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + MergeBranchesBySquashError::TipsDivergenceExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for MergeBranchesBySquashError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for MergeBranchesBySquashError { + fn description(&self) -> &str { + match *self { + MergeBranchesBySquashError::BranchDoesNotExist(ref cause) => cause, + MergeBranchesBySquashError::BranchNameIsTagName(ref cause) => cause, + MergeBranchesBySquashError::BranchNameRequired(ref cause) => cause, + MergeBranchesBySquashError::CommitDoesNotExist(ref cause) => cause, + MergeBranchesBySquashError::CommitMessageLengthExceeded(ref cause) => cause, + MergeBranchesBySquashError::CommitRequired(ref cause) => cause, + MergeBranchesBySquashError::ConcurrentReferenceUpdate(ref cause) => cause, + MergeBranchesBySquashError::EncryptionIntegrityChecksFailed(ref cause) => cause, + MergeBranchesBySquashError::EncryptionKeyAccessDenied(ref cause) => cause, + MergeBranchesBySquashError::EncryptionKeyDisabled(ref cause) => cause, + MergeBranchesBySquashError::EncryptionKeyNotFound(ref cause) => cause, + MergeBranchesBySquashError::EncryptionKeyUnavailable(ref cause) => cause, + MergeBranchesBySquashError::FileContentSizeLimitExceeded(ref cause) => cause, + MergeBranchesBySquashError::FileModeRequired(ref cause) => cause, + MergeBranchesBySquashError::FolderContentSizeLimitExceeded(ref cause) => cause, + MergeBranchesBySquashError::InvalidBranchName(ref cause) => cause, + MergeBranchesBySquashError::InvalidCommit(ref cause) => cause, + MergeBranchesBySquashError::InvalidConflictDetailLevel(ref cause) => cause, + MergeBranchesBySquashError::InvalidConflictResolution(ref cause) => cause, + MergeBranchesBySquashError::InvalidConflictResolutionStrategy(ref cause) => cause, + MergeBranchesBySquashError::InvalidEmail(ref cause) => cause, + MergeBranchesBySquashError::InvalidFileMode(ref cause) => cause, + MergeBranchesBySquashError::InvalidPath(ref cause) => cause, + MergeBranchesBySquashError::InvalidReplacementContent(ref cause) => cause, + MergeBranchesBySquashError::InvalidReplacementType(ref cause) => cause, + MergeBranchesBySquashError::InvalidRepositoryName(ref cause) => cause, + MergeBranchesBySquashError::InvalidTargetBranch(ref cause) => cause, + MergeBranchesBySquashError::ManualMergeRequired(ref cause) => cause, + MergeBranchesBySquashError::MaximumConflictResolutionEntriesExceeded(ref cause) => { + cause + } + MergeBranchesBySquashError::MaximumFileContentToLoadExceeded(ref cause) => cause, + MergeBranchesBySquashError::MaximumItemsToCompareExceeded(ref cause) => cause, + MergeBranchesBySquashError::MultipleConflictResolutionEntries(ref cause) => cause, + MergeBranchesBySquashError::NameLengthExceeded(ref cause) => cause, + MergeBranchesBySquashError::PathRequired(ref cause) => cause, + MergeBranchesBySquashError::ReplacementContentRequired(ref cause) => cause, + MergeBranchesBySquashError::ReplacementTypeRequired(ref cause) => cause, + MergeBranchesBySquashError::RepositoryDoesNotExist(ref cause) => cause, + MergeBranchesBySquashError::RepositoryNameRequired(ref cause) => cause, + MergeBranchesBySquashError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by MergeBranchesByThreeWay +#[derive(Debug, PartialEq)] +pub enum MergeBranchesByThreeWayError { + ///

The specified branch does not exist.

+ BranchDoesNotExist(String), + ///

The specified branch name is not valid because it is a tag name. Type the name of a current branch in the repository. For a list of valid branch names, use ListBranches.

+ BranchNameIsTagName(String), + ///

A branch name is required but was not specified.

+ BranchNameRequired(String), + ///

The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

+ CommitDoesNotExist(String), + ///

The commit message is too long. Provide a shorter string.

+ CommitMessageLengthExceeded(String), + ///

A commit was not specified.

+ CommitRequired(String), + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ FileContentSizeLimitExceeded(String), + ///

The commit cannot be created because a file mode is required to update mode permissions for an existing file, but no file mode has been specified.

+ FileModeRequired(String), + ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ FolderContentSizeLimitExceeded(String), + ///

The specified reference name is not valid.

+ InvalidBranchName(String), + ///

The specified commit is not valid.

+ InvalidCommit(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution list is not valid.

+ InvalidConflictResolution(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified email address either contains one or more characters that are not allowed, or it exceeds the maximum number of characters allowed for an email address.

+ InvalidEmail(String), + ///

The specified file mode permission is not valid. For a list of valid file mode permissions, see PutFile.

+ InvalidFileMode(String), + ///

The specified path is not valid.

+ InvalidPath(String), + ///

Automerge was specified for resolving the conflict, but the replacement type is not valid or content is missing.

+ InvalidReplacementContent(String), + ///

Automerge was specified for resolving the conflict, but the specified replacement type is not valid.

+ InvalidReplacementType(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The specified target branch is not valid.

+ InvalidTargetBranch(String), + ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

+ ManualMergeRequired(String), + ///

The number of allowed conflict resolution entries was exceeded.

+ MaximumConflictResolutionEntriesExceeded(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

More than one conflict resolution entries exists for the conflict. A conflict can have only one conflict resolution entry.

+ MultipleConflictResolutionEntries(String), + ///

The user name is not valid because it has exceeded the character limit for author names.

+ NameLengthExceeded(String), + ///

The folderPath for a location cannot be null.

+ PathRequired(String), + ///

USE_NEW_CONTENT was specified but no replacement content has been provided.

+ ReplacementContentRequired(String), + ///

A replacement type is required.

+ ReplacementTypeRequired(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), +} + +impl MergeBranchesByThreeWayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "BranchDoesNotExistException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::BranchDoesNotExist( + err.msg, + )) + } + "BranchNameIsTagNameException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::BranchNameIsTagName( + err.msg, + )) + } + "BranchNameRequiredException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::BranchNameRequired( + err.msg, + )) + } + "CommitDoesNotExistException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::CommitDoesNotExist( + err.msg, + )) + } + "CommitMessageLengthExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::CommitMessageLengthExceeded(err.msg), + ) + } + "CommitRequiredException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::CommitRequired( + err.msg, + )) + } + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::ConcurrentReferenceUpdate(err.msg), + ) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::EncryptionKeyUnavailable(err.msg), + ) + } + "FileContentSizeLimitExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::FileContentSizeLimitExceeded(err.msg), + ) + } + "FileModeRequiredException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::FileModeRequired( + err.msg, + )) + } + "FolderContentSizeLimitExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::FolderContentSizeLimitExceeded(err.msg), + ) + } + "InvalidBranchNameException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::InvalidBranchName( + err.msg, + )) + } + "InvalidCommitException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::InvalidCommit( + err.msg, + )) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::InvalidConflictResolution(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::InvalidConflictResolutionStrategy(err.msg), + ) + } + "InvalidEmailException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::InvalidEmail( + err.msg, + )) + } + "InvalidFileModeException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::InvalidFileMode( + err.msg, + )) + } + "InvalidPathException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::InvalidPath(err.msg)) + } + "InvalidReplacementContentException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::InvalidReplacementContent(err.msg), + ) + } + "InvalidReplacementTypeException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::InvalidReplacementType(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::InvalidRepositoryName(err.msg), + ) + } + "InvalidTargetBranchException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::InvalidTargetBranch( + err.msg, + )) + } + "ManualMergeRequiredException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::ManualMergeRequired( + err.msg, + )) + } + "MaximumConflictResolutionEntriesExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::MaximumConflictResolutionEntriesExceeded( + err.msg, + ), + ) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MultipleConflictResolutionEntriesException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::MultipleConflictResolutionEntries(err.msg), + ) + } + "NameLengthExceededException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::NameLengthExceeded( + err.msg, + )) + } + "PathRequiredException" => { + return RusotoError::Service(MergeBranchesByThreeWayError::PathRequired( + err.msg, + )) + } + "ReplacementContentRequiredException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::ReplacementContentRequired(err.msg), + ) + } + "ReplacementTypeRequiredException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::ReplacementTypeRequired(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::RepositoryNameRequired(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + MergeBranchesByThreeWayError::TipsDivergenceExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for MergeBranchesByThreeWayError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for MergeBranchesByThreeWayError { + fn description(&self) -> &str { + match *self { + MergeBranchesByThreeWayError::BranchDoesNotExist(ref cause) => cause, + MergeBranchesByThreeWayError::BranchNameIsTagName(ref cause) => cause, + MergeBranchesByThreeWayError::BranchNameRequired(ref cause) => cause, + MergeBranchesByThreeWayError::CommitDoesNotExist(ref cause) => cause, + MergeBranchesByThreeWayError::CommitMessageLengthExceeded(ref cause) => cause, + MergeBranchesByThreeWayError::CommitRequired(ref cause) => cause, + MergeBranchesByThreeWayError::ConcurrentReferenceUpdate(ref cause) => cause, + MergeBranchesByThreeWayError::EncryptionIntegrityChecksFailed(ref cause) => cause, + MergeBranchesByThreeWayError::EncryptionKeyAccessDenied(ref cause) => cause, + MergeBranchesByThreeWayError::EncryptionKeyDisabled(ref cause) => cause, + MergeBranchesByThreeWayError::EncryptionKeyNotFound(ref cause) => cause, + MergeBranchesByThreeWayError::EncryptionKeyUnavailable(ref cause) => cause, + MergeBranchesByThreeWayError::FileContentSizeLimitExceeded(ref cause) => cause, + MergeBranchesByThreeWayError::FileModeRequired(ref cause) => cause, + MergeBranchesByThreeWayError::FolderContentSizeLimitExceeded(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidBranchName(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidCommit(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidConflictDetailLevel(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidConflictResolution(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidConflictResolutionStrategy(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidEmail(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidFileMode(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidPath(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidReplacementContent(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidReplacementType(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidRepositoryName(ref cause) => cause, + MergeBranchesByThreeWayError::InvalidTargetBranch(ref cause) => cause, + MergeBranchesByThreeWayError::ManualMergeRequired(ref cause) => cause, + MergeBranchesByThreeWayError::MaximumConflictResolutionEntriesExceeded(ref cause) => { + cause + } + MergeBranchesByThreeWayError::MaximumFileContentToLoadExceeded(ref cause) => cause, + MergeBranchesByThreeWayError::MaximumItemsToCompareExceeded(ref cause) => cause, + MergeBranchesByThreeWayError::MultipleConflictResolutionEntries(ref cause) => cause, + MergeBranchesByThreeWayError::NameLengthExceeded(ref cause) => cause, + MergeBranchesByThreeWayError::PathRequired(ref cause) => cause, + MergeBranchesByThreeWayError::ReplacementContentRequired(ref cause) => cause, + MergeBranchesByThreeWayError::ReplacementTypeRequired(ref cause) => cause, + MergeBranchesByThreeWayError::RepositoryDoesNotExist(ref cause) => cause, + MergeBranchesByThreeWayError::RepositoryNameRequired(ref cause) => cause, + MergeBranchesByThreeWayError::TipsDivergenceExceeded(ref cause) => cause, + } + } +} +/// Errors returned by MergePullRequestByFastForward +#[derive(Debug, PartialEq)] +pub enum MergePullRequestByFastForwardError { + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The specified commit ID is not valid.

+ InvalidCommitId(String), + ///

The pull request ID is not valid. Make sure that you have provided the full ID and that the pull request is in the specified repository, and then try again.

+ InvalidPullRequestId(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

+ ManualMergeRequired(String), + ///

The pull request status cannot be updated because it is already closed.

+ PullRequestAlreadyClosed(String), + ///

The pull request ID could not be found. Make sure that you have specified the correct repository name and pull request ID, and then try again.

+ PullRequestDoesNotExist(String), + ///

A pull request ID is required, but none was provided.

+ PullRequestIdRequired(String), + ///

The specified reference does not exist. You must provide a full commit ID.

+ ReferenceDoesNotExist(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The repository does not contain any pull requests with that pull request ID. Use GetPullRequest to verify the correct repository name for the pull request ID.

+ RepositoryNotAssociatedWithPullRequest(String), + ///

The tip of the source branch in the destination repository does not match the tip of the source branch specified in your request. The pull request might have been updated. Make sure that you have the latest changes.

+ TipOfSourceReferenceIsDifferent(String), +} + +impl MergePullRequestByFastForwardError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::ConcurrentReferenceUpdate(err.msg), + ) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::EncryptionIntegrityChecksFailed( + err.msg, + ), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::EncryptionKeyUnavailable(err.msg), + ) + } + "InvalidCommitIdException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::InvalidCommitId(err.msg), + ) + } + "InvalidPullRequestIdException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::InvalidPullRequestId(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::InvalidRepositoryName(err.msg), + ) + } + "ManualMergeRequiredException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::ManualMergeRequired(err.msg), + ) + } + "PullRequestAlreadyClosedException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::PullRequestAlreadyClosed(err.msg), + ) + } + "PullRequestDoesNotExistException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::PullRequestDoesNotExist(err.msg), + ) + } + "PullRequestIdRequiredException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::PullRequestIdRequired(err.msg), + ) + } + "ReferenceDoesNotExistException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::ReferenceDoesNotExist(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::RepositoryNameRequired(err.msg), + ) + } + "RepositoryNotAssociatedWithPullRequestException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::RepositoryNotAssociatedWithPullRequest( + err.msg, + ), + ) + } + "TipOfSourceReferenceIsDifferentException" => { + return RusotoError::Service( + MergePullRequestByFastForwardError::TipOfSourceReferenceIsDifferent( + err.msg, + ), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for MergePullRequestByFastForwardError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for MergePullRequestByFastForwardError { + fn description(&self) -> &str { + match *self { + MergePullRequestByFastForwardError::ConcurrentReferenceUpdate(ref cause) => cause, + MergePullRequestByFastForwardError::EncryptionIntegrityChecksFailed(ref cause) => cause, + MergePullRequestByFastForwardError::EncryptionKeyAccessDenied(ref cause) => cause, + MergePullRequestByFastForwardError::EncryptionKeyDisabled(ref cause) => cause, + MergePullRequestByFastForwardError::EncryptionKeyNotFound(ref cause) => cause, + MergePullRequestByFastForwardError::EncryptionKeyUnavailable(ref cause) => cause, + MergePullRequestByFastForwardError::InvalidCommitId(ref cause) => cause, + MergePullRequestByFastForwardError::InvalidPullRequestId(ref cause) => cause, + MergePullRequestByFastForwardError::InvalidRepositoryName(ref cause) => cause, + MergePullRequestByFastForwardError::ManualMergeRequired(ref cause) => cause, + MergePullRequestByFastForwardError::PullRequestAlreadyClosed(ref cause) => cause, + MergePullRequestByFastForwardError::PullRequestDoesNotExist(ref cause) => cause, + MergePullRequestByFastForwardError::PullRequestIdRequired(ref cause) => cause, + MergePullRequestByFastForwardError::ReferenceDoesNotExist(ref cause) => cause, + MergePullRequestByFastForwardError::RepositoryDoesNotExist(ref cause) => cause, + MergePullRequestByFastForwardError::RepositoryNameRequired(ref cause) => cause, + MergePullRequestByFastForwardError::RepositoryNotAssociatedWithPullRequest( + ref cause, + ) => cause, + MergePullRequestByFastForwardError::TipOfSourceReferenceIsDifferent(ref cause) => cause, + } + } +} +/// Errors returned by MergePullRequestBySquash +#[derive(Debug, PartialEq)] +pub enum MergePullRequestBySquashError { + ///

The commit message is too long. Provide a shorter string.

+ CommitMessageLengthExceeded(String), + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), + ///

An encryption integrity check failed.

+ EncryptionIntegrityChecksFailed(String), + ///

An encryption key could not be accessed.

+ EncryptionKeyAccessDenied(String), + ///

The encryption key is disabled.

+ EncryptionKeyDisabled(String), + ///

No encryption key was found.

+ EncryptionKeyNotFound(String), + ///

The encryption key is not available.

+ EncryptionKeyUnavailable(String), + ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ FileContentSizeLimitExceeded(String), + ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ FolderContentSizeLimitExceeded(String), + ///

The specified commit ID is not valid.

+ InvalidCommitId(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution list is not valid.

+ InvalidConflictResolution(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified email address either contains one or more characters that are not allowed, or it exceeds the maximum number of characters allowed for an email address.

+ InvalidEmail(String), + ///

The specified file mode permission is not valid. For a list of valid file mode permissions, see PutFile.

+ InvalidFileMode(String), + ///

The specified path is not valid.

+ InvalidPath(String), + ///

The pull request ID is not valid. Make sure that you have provided the full ID and that the pull request is in the specified repository, and then try again.

+ InvalidPullRequestId(String), + ///

Automerge was specified for resolving the conflict, but the replacement type is not valid or content is missing.

+ InvalidReplacementContent(String), + ///

Automerge was specified for resolving the conflict, but the specified replacement type is not valid.

+ InvalidReplacementType(String), + ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

+ InvalidRepositoryName(String), + ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

+ ManualMergeRequired(String), + ///

The number of allowed conflict resolution entries was exceeded.

+ MaximumConflictResolutionEntriesExceeded(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

More than one conflict resolution entries exists for the conflict. A conflict can have only one conflict resolution entry.

+ MultipleConflictResolutionEntries(String), + ///

The user name is not valid because it has exceeded the character limit for author names.

+ NameLengthExceeded(String), + ///

The folderPath for a location cannot be null.

+ PathRequired(String), + ///

The pull request status cannot be updated because it is already closed.

+ PullRequestAlreadyClosed(String), + ///

The pull request ID could not be found. Make sure that you have specified the correct repository name and pull request ID, and then try again.

+ PullRequestDoesNotExist(String), + ///

A pull request ID is required, but none was provided.

+ PullRequestIdRequired(String), + ///

USE_NEW_CONTENT was specified but no replacement content has been provided.

+ ReplacementContentRequired(String), + ///

A replacement type is required.

+ ReplacementTypeRequired(String), + ///

The specified repository does not exist.

+ RepositoryDoesNotExist(String), + ///

A repository name is required but was not specified.

+ RepositoryNameRequired(String), + ///

The repository does not contain any pull requests with that pull request ID. Use GetPullRequest to verify the correct repository name for the pull request ID.

+ RepositoryNotAssociatedWithPullRequest(String), + ///

The tip of the source branch in the destination repository does not match the tip of the source branch specified in your request. The pull request might have been updated. Make sure that you have the latest changes.

+ TipOfSourceReferenceIsDifferent(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), +} + +impl MergePullRequestBySquashError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "CommitMessageLengthExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::CommitMessageLengthExceeded(err.msg), + ) + } + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + MergePullRequestBySquashError::ConcurrentReferenceUpdate(err.msg), + ) + } + "EncryptionIntegrityChecksFailedException" => { + return RusotoError::Service( + MergePullRequestBySquashError::EncryptionIntegrityChecksFailed(err.msg), + ) + } + "EncryptionKeyAccessDeniedException" => { + return RusotoError::Service( + MergePullRequestBySquashError::EncryptionKeyAccessDenied(err.msg), + ) + } + "EncryptionKeyDisabledException" => { + return RusotoError::Service( + MergePullRequestBySquashError::EncryptionKeyDisabled(err.msg), + ) + } + "EncryptionKeyNotFoundException" => { + return RusotoError::Service( + MergePullRequestBySquashError::EncryptionKeyNotFound(err.msg), + ) + } + "EncryptionKeyUnavailableException" => { + return RusotoError::Service( + MergePullRequestBySquashError::EncryptionKeyUnavailable(err.msg), + ) + } + "FileContentSizeLimitExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::FileContentSizeLimitExceeded(err.msg), + ) + } + "FolderContentSizeLimitExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::FolderContentSizeLimitExceeded(err.msg), + ) + } + "InvalidCommitIdException" => { + return RusotoError::Service(MergePullRequestBySquashError::InvalidCommitId( + err.msg, + )) + } + "InvalidConflictDetailLevelException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidConflictResolution(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidConflictResolutionStrategy(err.msg), + ) + } + "InvalidEmailException" => { + return RusotoError::Service(MergePullRequestBySquashError::InvalidEmail( + err.msg, + )) + } + "InvalidFileModeException" => { + return RusotoError::Service(MergePullRequestBySquashError::InvalidFileMode( err.msg, )) } - "RepositoryDoesNotExistException" => { - return RusotoError::Service(ListTagsForResourceError::RepositoryDoesNotExist( + "InvalidPathException" => { + return RusotoError::Service(MergePullRequestBySquashError::InvalidPath( err.msg, )) } - "ResourceArnRequiredException" => { - return RusotoError::Service(ListTagsForResourceError::ResourceArnRequired( + "InvalidPullRequestIdException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidPullRequestId(err.msg), + ) + } + "InvalidReplacementContentException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidReplacementContent(err.msg), + ) + } + "InvalidReplacementTypeException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidReplacementType(err.msg), + ) + } + "InvalidRepositoryNameException" => { + return RusotoError::Service( + MergePullRequestBySquashError::InvalidRepositoryName(err.msg), + ) + } + "ManualMergeRequiredException" => { + return RusotoError::Service( + MergePullRequestBySquashError::ManualMergeRequired(err.msg), + ) + } + "MaximumConflictResolutionEntriesExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::MaximumConflictResolutionEntriesExceeded( + err.msg, + ), + ) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MultipleConflictResolutionEntriesException" => { + return RusotoError::Service( + MergePullRequestBySquashError::MultipleConflictResolutionEntries(err.msg), + ) + } + "NameLengthExceededException" => { + return RusotoError::Service(MergePullRequestBySquashError::NameLengthExceeded( + err.msg, + )) + } + "PathRequiredException" => { + return RusotoError::Service(MergePullRequestBySquashError::PathRequired( err.msg, )) } + "PullRequestAlreadyClosedException" => { + return RusotoError::Service( + MergePullRequestBySquashError::PullRequestAlreadyClosed(err.msg), + ) + } + "PullRequestDoesNotExistException" => { + return RusotoError::Service( + MergePullRequestBySquashError::PullRequestDoesNotExist(err.msg), + ) + } + "PullRequestIdRequiredException" => { + return RusotoError::Service( + MergePullRequestBySquashError::PullRequestIdRequired(err.msg), + ) + } + "ReplacementContentRequiredException" => { + return RusotoError::Service( + MergePullRequestBySquashError::ReplacementContentRequired(err.msg), + ) + } + "ReplacementTypeRequiredException" => { + return RusotoError::Service( + MergePullRequestBySquashError::ReplacementTypeRequired(err.msg), + ) + } + "RepositoryDoesNotExistException" => { + return RusotoError::Service( + MergePullRequestBySquashError::RepositoryDoesNotExist(err.msg), + ) + } + "RepositoryNameRequiredException" => { + return RusotoError::Service( + MergePullRequestBySquashError::RepositoryNameRequired(err.msg), + ) + } + "RepositoryNotAssociatedWithPullRequestException" => { + return RusotoError::Service( + MergePullRequestBySquashError::RepositoryNotAssociatedWithPullRequest( + err.msg, + ), + ) + } + "TipOfSourceReferenceIsDifferentException" => { + return RusotoError::Service( + MergePullRequestBySquashError::TipOfSourceReferenceIsDifferent(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + MergePullRequestBySquashError::TipsDivergenceExceeded(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -5000,24 +8326,65 @@ impl ListTagsForResourceError { return RusotoError::Unknown(res); } } -impl fmt::Display for ListTagsForResourceError { +impl fmt::Display for MergePullRequestBySquashError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ListTagsForResourceError { +impl Error for MergePullRequestBySquashError { fn description(&self) -> &str { match *self { - ListTagsForResourceError::InvalidRepositoryName(ref cause) => cause, - ListTagsForResourceError::InvalidResourceArn(ref cause) => cause, - ListTagsForResourceError::RepositoryDoesNotExist(ref cause) => cause, - ListTagsForResourceError::ResourceArnRequired(ref cause) => cause, + MergePullRequestBySquashError::CommitMessageLengthExceeded(ref cause) => cause, + MergePullRequestBySquashError::ConcurrentReferenceUpdate(ref cause) => cause, + MergePullRequestBySquashError::EncryptionIntegrityChecksFailed(ref cause) => cause, + MergePullRequestBySquashError::EncryptionKeyAccessDenied(ref cause) => cause, + MergePullRequestBySquashError::EncryptionKeyDisabled(ref cause) => cause, + MergePullRequestBySquashError::EncryptionKeyNotFound(ref cause) => cause, + MergePullRequestBySquashError::EncryptionKeyUnavailable(ref cause) => cause, + MergePullRequestBySquashError::FileContentSizeLimitExceeded(ref cause) => cause, + MergePullRequestBySquashError::FolderContentSizeLimitExceeded(ref cause) => cause, + MergePullRequestBySquashError::InvalidCommitId(ref cause) => cause, + MergePullRequestBySquashError::InvalidConflictDetailLevel(ref cause) => cause, + MergePullRequestBySquashError::InvalidConflictResolution(ref cause) => cause, + MergePullRequestBySquashError::InvalidConflictResolutionStrategy(ref cause) => cause, + MergePullRequestBySquashError::InvalidEmail(ref cause) => cause, + MergePullRequestBySquashError::InvalidFileMode(ref cause) => cause, + MergePullRequestBySquashError::InvalidPath(ref cause) => cause, + MergePullRequestBySquashError::InvalidPullRequestId(ref cause) => cause, + MergePullRequestBySquashError::InvalidReplacementContent(ref cause) => cause, + MergePullRequestBySquashError::InvalidReplacementType(ref cause) => cause, + MergePullRequestBySquashError::InvalidRepositoryName(ref cause) => cause, + MergePullRequestBySquashError::ManualMergeRequired(ref cause) => cause, + MergePullRequestBySquashError::MaximumConflictResolutionEntriesExceeded(ref cause) => { + cause + } + MergePullRequestBySquashError::MaximumFileContentToLoadExceeded(ref cause) => cause, + MergePullRequestBySquashError::MaximumItemsToCompareExceeded(ref cause) => cause, + MergePullRequestBySquashError::MultipleConflictResolutionEntries(ref cause) => cause, + MergePullRequestBySquashError::NameLengthExceeded(ref cause) => cause, + MergePullRequestBySquashError::PathRequired(ref cause) => cause, + MergePullRequestBySquashError::PullRequestAlreadyClosed(ref cause) => cause, + MergePullRequestBySquashError::PullRequestDoesNotExist(ref cause) => cause, + MergePullRequestBySquashError::PullRequestIdRequired(ref cause) => cause, + MergePullRequestBySquashError::ReplacementContentRequired(ref cause) => cause, + MergePullRequestBySquashError::ReplacementTypeRequired(ref cause) => cause, + MergePullRequestBySquashError::RepositoryDoesNotExist(ref cause) => cause, + MergePullRequestBySquashError::RepositoryNameRequired(ref cause) => cause, + MergePullRequestBySquashError::RepositoryNotAssociatedWithPullRequest(ref cause) => { + cause + } + MergePullRequestBySquashError::TipOfSourceReferenceIsDifferent(ref cause) => cause, + MergePullRequestBySquashError::TipsDivergenceExceeded(ref cause) => cause, } } } -/// Errors returned by MergePullRequestByFastForward +/// Errors returned by MergePullRequestByThreeWay #[derive(Debug, PartialEq)] -pub enum MergePullRequestByFastForwardError { +pub enum MergePullRequestByThreeWayError { + ///

The commit message is too long. Provide a shorter string.

+ CommitMessageLengthExceeded(String), + ///

The merge cannot be completed because the target branch has been modified. Another user might have modified the target branch while the merge was in progress. Wait a few minutes, and then try again.

+ ConcurrentReferenceUpdate(String), ///

An encryption integrity check failed.

EncryptionIntegrityChecksFailed(String), ///

An encryption key could not be accessed.

@@ -5028,120 +8395,263 @@ pub enum MergePullRequestByFastForwardError { EncryptionKeyNotFound(String), ///

The encryption key is not available.

EncryptionKeyUnavailable(String), + ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ FileContentSizeLimitExceeded(String), + ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ FolderContentSizeLimitExceeded(String), ///

The specified commit ID is not valid.

InvalidCommitId(String), + ///

The specified conflict detail level is not valid.

+ InvalidConflictDetailLevel(String), + ///

The specified conflict resolution list is not valid.

+ InvalidConflictResolution(String), + ///

The specified conflict resolution strategy is not valid.

+ InvalidConflictResolutionStrategy(String), + ///

The specified email address either contains one or more characters that are not allowed, or it exceeds the maximum number of characters allowed for an email address.

+ InvalidEmail(String), + ///

The specified file mode permission is not valid. For a list of valid file mode permissions, see PutFile.

+ InvalidFileMode(String), + ///

The specified path is not valid.

+ InvalidPath(String), ///

The pull request ID is not valid. Make sure that you have provided the full ID and that the pull request is in the specified repository, and then try again.

InvalidPullRequestId(String), + ///

Automerge was specified for resolving the conflict, but the replacement type is not valid or content is missing.

+ InvalidReplacementContent(String), + ///

Automerge was specified for resolving the conflict, but the specified replacement type is not valid.

+ InvalidReplacementType(String), ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

InvalidRepositoryName(String), ///

The pull request cannot be merged automatically into the destination branch. You must manually merge the branches and resolve any conflicts.

ManualMergeRequired(String), + ///

The number of allowed conflict resolution entries was exceeded.

+ MaximumConflictResolutionEntriesExceeded(String), + ///

The number of files to load exceeds the allowed limit.

+ MaximumFileContentToLoadExceeded(String), + ///

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

+ MaximumItemsToCompareExceeded(String), + ///

More than one conflict resolution entries exists for the conflict. A conflict can have only one conflict resolution entry.

+ MultipleConflictResolutionEntries(String), + ///

The user name is not valid because it has exceeded the character limit for author names.

+ NameLengthExceeded(String), + ///

The folderPath for a location cannot be null.

+ PathRequired(String), ///

The pull request status cannot be updated because it is already closed.

PullRequestAlreadyClosed(String), ///

The pull request ID could not be found. Make sure that you have specified the correct repository name and pull request ID, and then try again.

PullRequestDoesNotExist(String), ///

A pull request ID is required, but none was provided.

PullRequestIdRequired(String), - ///

The specified reference does not exist. You must provide a full commit ID.

- ReferenceDoesNotExist(String), + ///

USE_NEW_CONTENT was specified but no replacement content has been provided.

+ ReplacementContentRequired(String), + ///

A replacement type is required.

+ ReplacementTypeRequired(String), ///

The specified repository does not exist.

RepositoryDoesNotExist(String), ///

A repository name is required but was not specified.

RepositoryNameRequired(String), + ///

The repository does not contain any pull requests with that pull request ID. Use GetPullRequest to verify the correct repository name for the pull request ID.

+ RepositoryNotAssociatedWithPullRequest(String), ///

The tip of the source branch in the destination repository does not match the tip of the source branch specified in your request. The pull request might have been updated. Make sure that you have the latest changes.

TipOfSourceReferenceIsDifferent(String), + ///

The divergence between the tips of the provided commit specifiers is too great to determine whether there might be any merge conflicts. Locally compare the specifiers using git diff or a diff tool.

+ TipsDivergenceExceeded(String), } -impl MergePullRequestByFastForwardError { +impl MergePullRequestByThreeWayError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { + ) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "CommitMessageLengthExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::CommitMessageLengthExceeded(err.msg), + ) + } + "ConcurrentReferenceUpdateException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::ConcurrentReferenceUpdate(err.msg), + ) + } "EncryptionIntegrityChecksFailedException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::EncryptionIntegrityChecksFailed( - err.msg, - ), + MergePullRequestByThreeWayError::EncryptionIntegrityChecksFailed(err.msg), ) } "EncryptionKeyAccessDeniedException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::EncryptionKeyAccessDenied(err.msg), + MergePullRequestByThreeWayError::EncryptionKeyAccessDenied(err.msg), ) } "EncryptionKeyDisabledException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::EncryptionKeyDisabled(err.msg), + MergePullRequestByThreeWayError::EncryptionKeyDisabled(err.msg), ) } "EncryptionKeyNotFoundException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::EncryptionKeyNotFound(err.msg), + MergePullRequestByThreeWayError::EncryptionKeyNotFound(err.msg), ) } "EncryptionKeyUnavailableException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::EncryptionKeyUnavailable(err.msg), + MergePullRequestByThreeWayError::EncryptionKeyUnavailable(err.msg), + ) + } + "FileContentSizeLimitExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::FileContentSizeLimitExceeded(err.msg), + ) + } + "FolderContentSizeLimitExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::FolderContentSizeLimitExceeded(err.msg), ) } "InvalidCommitIdException" => { + return RusotoError::Service(MergePullRequestByThreeWayError::InvalidCommitId( + err.msg, + )) + } + "InvalidConflictDetailLevelException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::InvalidCommitId(err.msg), + MergePullRequestByThreeWayError::InvalidConflictDetailLevel(err.msg), + ) + } + "InvalidConflictResolutionException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::InvalidConflictResolution(err.msg), + ) + } + "InvalidConflictResolutionStrategyException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::InvalidConflictResolutionStrategy(err.msg), ) } + "InvalidEmailException" => { + return RusotoError::Service(MergePullRequestByThreeWayError::InvalidEmail( + err.msg, + )) + } + "InvalidFileModeException" => { + return RusotoError::Service(MergePullRequestByThreeWayError::InvalidFileMode( + err.msg, + )) + } + "InvalidPathException" => { + return RusotoError::Service(MergePullRequestByThreeWayError::InvalidPath( + err.msg, + )) + } "InvalidPullRequestIdException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::InvalidPullRequestId(err.msg), + MergePullRequestByThreeWayError::InvalidPullRequestId(err.msg), + ) + } + "InvalidReplacementContentException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::InvalidReplacementContent(err.msg), + ) + } + "InvalidReplacementTypeException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::InvalidReplacementType(err.msg), ) } "InvalidRepositoryNameException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::InvalidRepositoryName(err.msg), + MergePullRequestByThreeWayError::InvalidRepositoryName(err.msg), ) } "ManualMergeRequiredException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::ManualMergeRequired(err.msg), + MergePullRequestByThreeWayError::ManualMergeRequired(err.msg), + ) + } + "MaximumConflictResolutionEntriesExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::MaximumConflictResolutionEntriesExceeded( + err.msg, + ), + ) + } + "MaximumFileContentToLoadExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::MaximumFileContentToLoadExceeded(err.msg), + ) + } + "MaximumItemsToCompareExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::MaximumItemsToCompareExceeded(err.msg), + ) + } + "MultipleConflictResolutionEntriesException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::MultipleConflictResolutionEntries(err.msg), + ) + } + "NameLengthExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::NameLengthExceeded(err.msg), ) } + "PathRequiredException" => { + return RusotoError::Service(MergePullRequestByThreeWayError::PathRequired( + err.msg, + )) + } "PullRequestAlreadyClosedException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::PullRequestAlreadyClosed(err.msg), + MergePullRequestByThreeWayError::PullRequestAlreadyClosed(err.msg), ) } "PullRequestDoesNotExistException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::PullRequestDoesNotExist(err.msg), + MergePullRequestByThreeWayError::PullRequestDoesNotExist(err.msg), ) } "PullRequestIdRequiredException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::PullRequestIdRequired(err.msg), + MergePullRequestByThreeWayError::PullRequestIdRequired(err.msg), ) } - "ReferenceDoesNotExistException" => { + "ReplacementContentRequiredException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::ReferenceDoesNotExist(err.msg), + MergePullRequestByThreeWayError::ReplacementContentRequired(err.msg), + ) + } + "ReplacementTypeRequiredException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::ReplacementTypeRequired(err.msg), ) } "RepositoryDoesNotExistException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::RepositoryDoesNotExist(err.msg), + MergePullRequestByThreeWayError::RepositoryDoesNotExist(err.msg), ) } "RepositoryNameRequiredException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::RepositoryNameRequired(err.msg), + MergePullRequestByThreeWayError::RepositoryNameRequired(err.msg), ) } - "TipOfSourceReferenceIsDifferentException" => { + "RepositoryNotAssociatedWithPullRequestException" => { return RusotoError::Service( - MergePullRequestByFastForwardError::TipOfSourceReferenceIsDifferent( + MergePullRequestByThreeWayError::RepositoryNotAssociatedWithPullRequest( err.msg, ), ) } + "TipOfSourceReferenceIsDifferentException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::TipOfSourceReferenceIsDifferent(err.msg), + ) + } + "TipsDivergenceExceededException" => { + return RusotoError::Service( + MergePullRequestByThreeWayError::TipsDivergenceExceeded(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -5149,30 +8659,55 @@ impl MergePullRequestByFastForwardError { return RusotoError::Unknown(res); } } -impl fmt::Display for MergePullRequestByFastForwardError { +impl fmt::Display for MergePullRequestByThreeWayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for MergePullRequestByFastForwardError { +impl Error for MergePullRequestByThreeWayError { fn description(&self) -> &str { match *self { - MergePullRequestByFastForwardError::EncryptionIntegrityChecksFailed(ref cause) => cause, - MergePullRequestByFastForwardError::EncryptionKeyAccessDenied(ref cause) => cause, - MergePullRequestByFastForwardError::EncryptionKeyDisabled(ref cause) => cause, - MergePullRequestByFastForwardError::EncryptionKeyNotFound(ref cause) => cause, - MergePullRequestByFastForwardError::EncryptionKeyUnavailable(ref cause) => cause, - MergePullRequestByFastForwardError::InvalidCommitId(ref cause) => cause, - MergePullRequestByFastForwardError::InvalidPullRequestId(ref cause) => cause, - MergePullRequestByFastForwardError::InvalidRepositoryName(ref cause) => cause, - MergePullRequestByFastForwardError::ManualMergeRequired(ref cause) => cause, - MergePullRequestByFastForwardError::PullRequestAlreadyClosed(ref cause) => cause, - MergePullRequestByFastForwardError::PullRequestDoesNotExist(ref cause) => cause, - MergePullRequestByFastForwardError::PullRequestIdRequired(ref cause) => cause, - MergePullRequestByFastForwardError::ReferenceDoesNotExist(ref cause) => cause, - MergePullRequestByFastForwardError::RepositoryDoesNotExist(ref cause) => cause, - MergePullRequestByFastForwardError::RepositoryNameRequired(ref cause) => cause, - MergePullRequestByFastForwardError::TipOfSourceReferenceIsDifferent(ref cause) => cause, + MergePullRequestByThreeWayError::CommitMessageLengthExceeded(ref cause) => cause, + MergePullRequestByThreeWayError::ConcurrentReferenceUpdate(ref cause) => cause, + MergePullRequestByThreeWayError::EncryptionIntegrityChecksFailed(ref cause) => cause, + MergePullRequestByThreeWayError::EncryptionKeyAccessDenied(ref cause) => cause, + MergePullRequestByThreeWayError::EncryptionKeyDisabled(ref cause) => cause, + MergePullRequestByThreeWayError::EncryptionKeyNotFound(ref cause) => cause, + MergePullRequestByThreeWayError::EncryptionKeyUnavailable(ref cause) => cause, + MergePullRequestByThreeWayError::FileContentSizeLimitExceeded(ref cause) => cause, + MergePullRequestByThreeWayError::FolderContentSizeLimitExceeded(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidCommitId(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidConflictDetailLevel(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidConflictResolution(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidConflictResolutionStrategy(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidEmail(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidFileMode(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidPath(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidPullRequestId(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidReplacementContent(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidReplacementType(ref cause) => cause, + MergePullRequestByThreeWayError::InvalidRepositoryName(ref cause) => cause, + MergePullRequestByThreeWayError::ManualMergeRequired(ref cause) => cause, + MergePullRequestByThreeWayError::MaximumConflictResolutionEntriesExceeded( + ref cause, + ) => cause, + MergePullRequestByThreeWayError::MaximumFileContentToLoadExceeded(ref cause) => cause, + MergePullRequestByThreeWayError::MaximumItemsToCompareExceeded(ref cause) => cause, + MergePullRequestByThreeWayError::MultipleConflictResolutionEntries(ref cause) => cause, + MergePullRequestByThreeWayError::NameLengthExceeded(ref cause) => cause, + MergePullRequestByThreeWayError::PathRequired(ref cause) => cause, + MergePullRequestByThreeWayError::PullRequestAlreadyClosed(ref cause) => cause, + MergePullRequestByThreeWayError::PullRequestDoesNotExist(ref cause) => cause, + MergePullRequestByThreeWayError::PullRequestIdRequired(ref cause) => cause, + MergePullRequestByThreeWayError::ReplacementContentRequired(ref cause) => cause, + MergePullRequestByThreeWayError::ReplacementTypeRequired(ref cause) => cause, + MergePullRequestByThreeWayError::RepositoryDoesNotExist(ref cause) => cause, + MergePullRequestByThreeWayError::RepositoryNameRequired(ref cause) => cause, + MergePullRequestByThreeWayError::RepositoryNotAssociatedWithPullRequest(ref cause) => { + cause + } + MergePullRequestByThreeWayError::TipOfSourceReferenceIsDifferent(ref cause) => cause, + MergePullRequestByThreeWayError::TipsDivergenceExceeded(ref cause) => cause, } } } @@ -5756,13 +9291,13 @@ pub enum PutFileError { EncryptionKeyUnavailable(String), ///

The file cannot be added because it is empty. Empty files cannot be added to the repository with this API.

FileContentRequired(String), - ///

The file cannot be added because it is too large. The maximum file size that can be added using PutFile is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

+ ///

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

FileContentSizeLimitExceeded(String), ///

A file cannot be added to the repository because the specified file name has the same name as a directory in this repository. Either provide another name for the file, or add the file in a directory that does not match the file name.

FileNameConflictsWithDirectoryName(String), ///

The commit cannot be created because a specified file path points to a submodule. Verify that the destination files have valid file paths that do not point to a submodule.

FilePathConflictsWithSubmodulePath(String), - ///

The commit cannot be created because at least one of the overall changes in the commit result in a folder contents exceeding the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

+ ///

The commit cannot be created because at least one of the overall changes in the commit results in a folder whose contents exceed the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

FolderContentSizeLimitExceeded(String), ///

The specified reference name is not valid.

InvalidBranchName(String), @@ -5778,7 +9313,7 @@ pub enum PutFileError { InvalidPath(String), ///

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

InvalidRepositoryName(String), - ///

The user name is not valid because it has exceeded the character limit for file names. File names, including the path to the file, cannot exceed the character limit.

+ ///

The user name is not valid because it has exceeded the character limit for author names.

NameLengthExceeded(String), ///

The parent commit ID is not valid because it does not exist. The specified parent commit ID does not exist in the specified branch of the repository.

ParentCommitDoesNotExist(String), @@ -7127,6 +10662,18 @@ impl Error for UpdateRepositoryNameError { } /// Trait representing the capabilities of the CodeCommit API. CodeCommit clients implement this trait. pub trait CodeCommit { + ///

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy.

+ fn batch_describe_merge_conflicts( + &self, + input: BatchDescribeMergeConflictsInput, + ) -> RusotoFuture; + + ///

Returns information about the contents of one or more commits in a repository.

+ fn batch_get_commits( + &self, + input: BatchGetCommitsInput, + ) -> RusotoFuture; + ///

Returns information about one or more repositories.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

fn batch_get_repositories( &self, @@ -7154,6 +10701,12 @@ pub trait CodeCommit { input: CreateRepositoryInput, ) -> RusotoFuture; + ///

Creates an unreferenced commit that represents the result of merging two branches using a specified merge strategy. This can help you determine the outcome of a potential merge. This API cannot be used with the fast-forward merge strategy, as that strategy does not create a merge commit.

This unreferenced merge commit can only be accessed using the GetCommit API or through git commands such as git fetch. To retrieve this commit, you must specify its commit ID or otherwise reference it.

+ fn create_unreferenced_merge_commit( + &self, + input: CreateUnreferencedMergeCommitInput, + ) -> RusotoFuture; + ///

Deletes a branch from a repository, unless that branch is the default branch for the repository.

fn delete_branch( &self, @@ -7178,6 +10731,12 @@ pub trait CodeCommit { input: DeleteRepositoryInput, ) -> RusotoFuture; + ///

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy. If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, an exception will be thrown.

+ fn describe_merge_conflicts( + &self, + input: DescribeMergeConflictsInput, + ) -> RusotoFuture; + ///

Returns information about one or more pull request events.

fn describe_pull_request_events( &self, @@ -7223,12 +10782,24 @@ pub trait CodeCommit { ///

Returns the contents of a specified folder in a repository.

fn get_folder(&self, input: GetFolderInput) -> RusotoFuture; + ///

Returns information about a specified merge commit.

+ fn get_merge_commit( + &self, + input: GetMergeCommitInput, + ) -> RusotoFuture; + ///

Returns information about merge conflicts between the before and after commit IDs for a pull request in a repository.

fn get_merge_conflicts( &self, input: GetMergeConflictsInput, ) -> RusotoFuture; + ///

Returns information about the merge options available for merging two specified branches. For details about why a particular merge option is not available, use GetMergeConflicts or DescribeMergeConflicts.

+ fn get_merge_options( + &self, + input: GetMergeOptionsInput, + ) -> RusotoFuture; + ///

Gets information about a pull request in a specified repository.

fn get_pull_request( &self, @@ -7271,12 +10842,42 @@ pub trait CodeCommit { input: ListTagsForResourceInput, ) -> RusotoFuture; - ///

Closes a pull request and attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge option.

+ ///

Merges two branches using the fast-forward merge strategy.

+ fn merge_branches_by_fast_forward( + &self, + input: MergeBranchesByFastForwardInput, + ) -> RusotoFuture; + + ///

Merges two branches using the squash merge strategy.

+ fn merge_branches_by_squash( + &self, + input: MergeBranchesBySquashInput, + ) -> RusotoFuture; + + ///

Merges two specified branches using the three-way merge strategy.

+ fn merge_branches_by_three_way( + &self, + input: MergeBranchesByThreeWayInput, + ) -> RusotoFuture; + + ///

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge strategy. If the merge is successful, it closes the pull request.

fn merge_pull_request_by_fast_forward( &self, input: MergePullRequestByFastForwardInput, ) -> RusotoFuture; + ///

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the squash merge strategy. If the merge is successful, it closes the pull request.

+ fn merge_pull_request_by_squash( + &self, + input: MergePullRequestBySquashInput, + ) -> RusotoFuture; + + ///

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the three-way merge strategy. If the merge is successful, it closes the pull request.

+ fn merge_pull_request_by_three_way( + &self, + input: MergePullRequestByThreeWayInput, + ) -> RusotoFuture; + ///

Posts a comment on the comparison between two commits.

fn post_comment_for_compared_commit( &self, @@ -7370,10 +10971,7 @@ impl CodeCommitClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CodeCommitClient { - CodeCommitClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7387,14 +10985,76 @@ impl CodeCommitClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CodeCommitClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CodeCommitClient { + CodeCommitClient { client, region } } } impl CodeCommit for CodeCommitClient { + ///

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy.

+ fn batch_describe_merge_conflicts( + &self, + input: BatchDescribeMergeConflictsInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "CodeCommit_20150413.BatchDescribeMergeConflicts", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(BatchDescribeMergeConflictsError::from_response(response)) + })) + } + }) + } + + ///

Returns information about the contents of one or more commits in a repository.

+ fn batch_get_commits( + &self, + input: BatchGetCommitsInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "CodeCommit_20150413.BatchGetCommits"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(BatchGetCommitsError::from_response(response))), + ) + } + }) + } + ///

Returns information about one or more repositories.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

fn batch_get_repositories( &self, @@ -7533,6 +11193,35 @@ impl CodeCommit for CodeCommitClient { }) } + ///

Creates an unreferenced commit that represents the result of merging two branches using a specified merge strategy. This can help you determine the outcome of a potential merge. This API cannot be used with the fast-forward merge strategy, as that strategy does not create a merge commit.

This unreferenced merge commit can only be accessed using the GetCommit API or through git commands such as git fetch. To retrieve this commit, you must specify its commit ID or otherwise reference it.

+ fn create_unreferenced_merge_commit( + &self, + input: CreateUnreferencedMergeCommitInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "CodeCommit_20150413.CreateUnreferencedMergeCommit", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(CreateUnreferencedMergeCommitError::from_response(response)) + })) + } + }) + } + ///

Deletes a branch from a repository, unless that branch is the default branch for the repository.

fn delete_branch( &self, @@ -7648,6 +11337,34 @@ impl CodeCommit for CodeCommitClient { }) } + ///

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy. If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, an exception will be thrown.

+ fn describe_merge_conflicts( + &self, + input: DescribeMergeConflictsInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "CodeCommit_20150413.DescribeMergeConflicts"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeMergeConflictsError::from_response(response)) + }), + ) + } + }) + } + ///

Returns information about one or more pull request events.

fn describe_pull_request_events( &self, @@ -7918,6 +11635,35 @@ impl CodeCommit for CodeCommitClient { }) } + ///

Returns information about a specified merge commit.

+ fn get_merge_commit( + &self, + input: GetMergeCommitInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "CodeCommit_20150413.GetMergeCommit"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMergeCommitError::from_response(response))), + ) + } + }) + } + ///

Returns information about merge conflicts between the before and after commit IDs for a pull request in a repository.

fn get_merge_conflicts( &self, @@ -7947,6 +11693,35 @@ impl CodeCommit for CodeCommitClient { }) } + ///

Returns information about the merge options available for merging two specified branches. For details about why a particular merge option is not available, use GetMergeConflicts or DescribeMergeConflicts.

+ fn get_merge_options( + &self, + input: GetMergeOptionsInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "CodeCommit_20150413.GetMergeOptions"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMergeOptionsError::from_response(response))), + ) + } + }) + } + ///

Gets information about a pull request in a specified repository.

fn get_pull_request( &self, @@ -8148,7 +11923,93 @@ impl CodeCommit for CodeCommitClient { }) } - ///

Closes a pull request and attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge option.

+ ///

Merges two branches using the fast-forward merge strategy.

+ fn merge_branches_by_fast_forward( + &self, + input: MergeBranchesByFastForwardInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "CodeCommit_20150413.MergeBranchesByFastForward", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(MergeBranchesByFastForwardError::from_response(response)) + })) + } + }) + } + + ///

Merges two branches using the squash merge strategy.

+ fn merge_branches_by_squash( + &self, + input: MergeBranchesBySquashInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "CodeCommit_20150413.MergeBranchesBySquash"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(MergeBranchesBySquashError::from_response(response)) + }), + ) + } + }) + } + + ///

Merges two specified branches using the three-way merge strategy.

+ fn merge_branches_by_three_way( + &self, + input: MergeBranchesByThreeWayInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "CodeCommit_20150413.MergeBranchesByThreeWay", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(MergeBranchesByThreeWayError::from_response(response)) + })) + } + }) + } + + ///

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge strategy. If the merge is successful, it closes the pull request.

fn merge_pull_request_by_fast_forward( &self, input: MergePullRequestByFastForwardInput, @@ -8177,6 +12038,64 @@ impl CodeCommit for CodeCommitClient { }) } + ///

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the squash merge strategy. If the merge is successful, it closes the pull request.

+ fn merge_pull_request_by_squash( + &self, + input: MergePullRequestBySquashInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "CodeCommit_20150413.MergePullRequestBySquash", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(MergePullRequestBySquashError::from_response(response)) + })) + } + }) + } + + ///

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the three-way merge strategy. If the merge is successful, it closes the pull request.

+ fn merge_pull_request_by_three_way( + &self, + input: MergePullRequestByThreeWayInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "codecommit", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "CodeCommit_20150413.MergePullRequestByThreeWay", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(MergePullRequestByThreeWayError::from_response(response)) + })) + } + }) + } + ///

Posts a comment on the comparison between two commits.

fn post_comment_for_compared_commit( &self, diff --git a/rusoto/services/codecommit/src/lib.rs b/rusoto/services/codecommit/src/lib.rs index 9c544ac989a..53c8927fd95 100644 --- a/rusoto/services/codecommit/src/lib.rs +++ b/rusoto/services/codecommit/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

  • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

  • CreateRepository, which creates an AWS CodeCommit repository.

  • DeleteRepository, which deletes an AWS CodeCommit repository.

  • GetRepository, which returns information about a specified repository.

  • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

  • UpdateRepositoryDescription, which sets or updates the description of the repository.

  • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.

Branches, by calling the following:

  • CreateBranch, which creates a new branch in a specified repository.

  • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

  • GetBranch, which returns information about a specified branch.

  • ListBranches, which lists all branches for a specified repository.

  • UpdateDefaultBranch, which changes the default branch for a repository.

Files, by calling the following:

  • DeleteFile, which deletes the content of a specified file from a specified branch.

  • GetFile, which returns the base-64 encoded content of a specified file.

  • GetFolder, which returns the contents of a specified folder or directory.

  • PutFile, which adds or modifies a file in a specified repository and branch.

Information about committed code in a repository, by calling the following:

  • CreateCommit, which creates a commit for changes to a repository.

  • GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.

  • GetCommit, which returns information about a commit, including commit messages and author and committer information.

  • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).

Pull requests, by calling the following:

Information about comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.

  • TagResource, which adds or updates tags for a resource in AWS CodeCommit.

  • UntagResource, which removes tags for a resource in AWS CodeCommit.

Triggers, by calling the following:

  • GetRepositoryTriggers, which returns information about triggers configured for a repository.

  • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

  • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

+//!

AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

  • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

  • CreateRepository, which creates an AWS CodeCommit repository.

  • DeleteRepository, which deletes an AWS CodeCommit repository.

  • GetRepository, which returns information about a specified repository.

  • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

  • UpdateRepositoryDescription, which sets or updates the description of the repository.

  • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.

Branches, by calling the following:

  • CreateBranch, which creates a new branch in a specified repository.

  • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

  • GetBranch, which returns information about a specified branch.

  • ListBranches, which lists all branches for a specified repository.

  • UpdateDefaultBranch, which changes the default branch for a repository.

Files, by calling the following:

  • DeleteFile, which deletes the content of a specified file from a specified branch.

  • GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.

  • GetFile, which returns the base-64 encoded content of a specified file.

  • GetFolder, which returns the contents of a specified folder or directory.

  • PutFile, which adds or modifies a single file in a specified repository and branch.

Commits, by calling the following:

  • BatchGetCommits, which returns information about one or more commits in a repository

  • CreateCommit, which creates a commit for changes to a repository.

  • GetCommit, which returns information about a commit, including commit messages and author and committer information.

  • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).

Merges, by calling the following:

  • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

  • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

  • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

  • GetMergeCommit, which returns information about the merge between a source and destination commit.

  • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

  • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

  • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

  • MergeBranchesBySquash, which merges two branches using the squash merge option.

  • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

Pull requests, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.

  • TagResource, which adds or updates tags for a resource in AWS CodeCommit.

  • UntagResource, which removes tags for a resource in AWS CodeCommit.

Triggers, by calling the following:

  • GetRepositoryTriggers, which returns information about triggers configured for a repository.

  • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

  • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

//! //! If you're using the service, you're probably looking for [CodeCommitClient](struct.CodeCommitClient.html) and [CodeCommit](trait.CodeCommit.html). diff --git a/rusoto/services/codedeploy/Cargo.toml b/rusoto/services/codedeploy/Cargo.toml index 0f29123c5ab..8d60eefe3c5 100644 --- a/rusoto/services/codedeploy/Cargo.toml +++ b/rusoto/services/codedeploy/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_codedeploy" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/codedeploy/README.md b/rusoto/services/codedeploy/README.md index 95a79de4a08..f75ee0c9c3f 100644 --- a/rusoto/services/codedeploy/README.md +++ b/rusoto/services/codedeploy/README.md @@ -23,9 +23,16 @@ To use `rusoto_codedeploy` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_codedeploy = "0.40.0" +rusoto_codedeploy = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/codedeploy/src/custom/mod.rs b/rusoto/services/codedeploy/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/codedeploy/src/custom/mod.rs +++ b/rusoto/services/codedeploy/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/codedeploy/src/generated.rs b/rusoto/services/codedeploy/src/generated.rs index df23a06ff8b..08245e0d746 100644 --- a/rusoto/services/codedeploy/src/generated.rs +++ b/rusoto/services/codedeploy/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -76,7 +75,7 @@ pub struct AppSpecContent { ///

Information about an application.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplicationInfo { ///

The application ID.

#[serde(rename = "applicationId")] @@ -119,7 +118,7 @@ pub struct AutoRollbackConfiguration { ///

Information about an Auto Scaling group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingGroup { ///

An Auto Scaling lifecycle event hook name.

#[serde(rename = "hook")] @@ -144,7 +143,7 @@ pub struct BatchGetApplicationRevisionsInput { ///

Represents the output of a BatchGetApplicationRevisions operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetApplicationRevisionsOutput { ///

The name of the application that corresponds to the revisions.

#[serde(rename = "applicationName")] @@ -170,7 +169,7 @@ pub struct BatchGetApplicationsInput { ///

Represents the output of a BatchGetApplications operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetApplicationsOutput { ///

Information about the applications.

#[serde(rename = "applicationsInfo")] @@ -191,7 +190,7 @@ pub struct BatchGetDeploymentGroupsInput { ///

Represents the output of a BatchGetDeploymentGroups operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetDeploymentGroupsOutput { ///

Information about the deployment groups.

#[serde(rename = "deploymentGroupsInfo")] @@ -216,7 +215,7 @@ pub struct BatchGetDeploymentInstancesInput { ///

Represents the output of a BatchGetDeploymentInstances operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetDeploymentInstancesOutput { ///

Information about errors that might have occurred during the API call.

#[serde(rename = "errorMessage")] @@ -241,7 +240,7 @@ pub struct BatchGetDeploymentTargetsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetDeploymentTargetsOutput { ///

A list of target objects for a deployment. Each target object contains details about the target, such as its status and lifecycle events. The type of the target objects depends on the deployment' compute platform.

  • EC2/On-premises: Each target object is an EC2 or on-premises instance.

  • AWS Lambda: The target object is a specific version of an AWS Lambda function.

  • Amazon ECS: The target object is an Amazon ECS service.

#[serde(rename = "deploymentTargets")] @@ -259,7 +258,7 @@ pub struct BatchGetDeploymentsInput { ///

Represents the output of a BatchGetDeployments operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetDeploymentsOutput { ///

Information about the deployments.

#[serde(rename = "deploymentsInfo")] @@ -277,7 +276,7 @@ pub struct BatchGetOnPremisesInstancesInput { ///

Represents the output of a BatchGetOnPremisesInstances operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetOnPremisesInstancesOutput { ///

Information about the on-premises instances.

#[serde(rename = "instanceInfos")] @@ -302,14 +301,14 @@ pub struct BlueGreenDeploymentConfiguration { pub terminate_blue_instances_on_deployment_success: Option, } -///

Information about whether instances in the original environment are terminated when a blue/green deployment is successful.

+///

Information about whether instances in the original environment are terminated when a blue/green deployment is successful. BlueInstanceTerminationOption does not apply to Lambda deployments.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BlueInstanceTerminationOption { ///

The action to take on instances in the original environment after a successful blue/green deployment.

  • TERMINATE: Instances are terminated after a specified wait time.

  • KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.

#[serde(rename = "action")] #[serde(skip_serializing_if = "Option::is_none")] pub action: Option, - ///

The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. The maximum setting is 2880 minutes (2 days).

+ ///

For an Amazon EC2 deployment, the number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.

For an Amazon ECS deployment, the number of minutes before deleting the original (blue) task set. During an Amazon ECS deployment, CodeDeploy shifts traffic from the original (blue) task set to a replacement (green) task set.

The maximum setting is 2880 minutes (2 days).

#[serde(rename = "terminationWaitTimeInMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub termination_wait_time_in_minutes: Option, @@ -345,7 +344,7 @@ pub struct CreateApplicationInput { ///

Represents the output of a CreateApplication operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApplicationOutput { ///

A unique application ID.

#[serde(rename = "applicationId")] @@ -375,7 +374,7 @@ pub struct CreateDeploymentConfigInput { ///

Represents the output of a CreateDeploymentConfig operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeploymentConfigOutput { ///

A unique deployment configuration ID.

#[serde(rename = "deploymentConfigId")] @@ -455,7 +454,7 @@ pub struct CreateDeploymentGroupInput { ///

Represents the output of a CreateDeploymentGroup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeploymentGroupOutput { ///

A unique deployment group ID.

#[serde(rename = "deploymentGroupId")] @@ -509,7 +508,7 @@ pub struct CreateDeploymentInput { ///

Represents the output of a CreateDeployment operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeploymentOutput { ///

The unique ID of a deployment.

#[serde(rename = "deploymentId")] @@ -546,7 +545,7 @@ pub struct DeleteDeploymentGroupInput { ///

Represents the output of a DeleteDeploymentGroup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDeploymentGroupOutput { ///

If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group. If the output contains data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group.

#[serde(rename = "hooksNotCleanedUp")] @@ -565,7 +564,7 @@ pub struct DeleteGitHubAccountTokenInput { ///

Represents the output of a DeleteGitHubAccountToken operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGitHubAccountTokenOutput { ///

The name of the GitHub account connection that was deleted.

#[serde(rename = "tokenName")] @@ -575,7 +574,7 @@ pub struct DeleteGitHubAccountTokenOutput { ///

Information about a deployment configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeploymentConfigInfo { ///

The destination platform type for the deployment (Lambda, Server, or ECS).

#[serde(rename = "computePlatform")] @@ -605,7 +604,7 @@ pub struct DeploymentConfigInfo { ///

Information about a deployment group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeploymentGroupInfo { ///

A list of alarms associated with the deployment group.

#[serde(rename = "alarmConfiguration")] @@ -695,7 +694,7 @@ pub struct DeploymentGroupInfo { ///

Information about a deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeploymentInfo { ///

Provides information about the results of a deployment, such as whether instances in the original environment in a blue/green deployment were not terminated.

#[serde(rename = "additionalDeploymentStatusInfo")] @@ -809,7 +808,7 @@ pub struct DeploymentInfo { ///

Information about the deployment status of the instances in the deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeploymentOverview { ///

The number of instances in the deployment in a failed state.

#[serde(rename = "Failed")] @@ -865,7 +864,7 @@ pub struct DeploymentStyle { ///

Information about the deployment target.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeploymentTarget { ///

The deployment type that is specific to the deployment's compute platform.

#[serde(rename = "deploymentTargetType")] @@ -895,7 +894,7 @@ pub struct DeregisterOnPremisesInstanceInput { ///

Diagnostic information about executable scripts that are part of a deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Diagnostics { ///

The associated error code:

  • Success: The specified script ran.

  • ScriptMissing: The specified script was not found in the specified location.

  • ScriptNotExecutable: The specified script is not a recognized executable file type.

  • ScriptTimedOut: The specified script did not finish running in the specified time period.

  • ScriptFailed: The specified script failed to run as expected.

  • UnknownError: The specified script did not run for an unknown reason.

#[serde(rename = "errorCode")] @@ -956,7 +955,7 @@ pub struct ECSService { ///

Information about the target of an Amazon ECS deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ECSTarget { ///

The unique ID of a deployment.

#[serde(rename = "deploymentId")] @@ -990,7 +989,7 @@ pub struct ECSTarget { ///

Information about a set of Amazon ECS tasks in an AWS CodeDeploy deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic. An AWS CodeDeploy application that uses the Amazon ECS compute platform deploys a containerized application in an Amazon ECS service as a task set.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ECSTaskSet { ///

The number of tasks in a task set. During a deployment that uses the Amazon ECS compute type, CodeDeploy instructs Amazon ECS to create a new task set and uses this value to determine how many tasks to create. After the updated task set is created, CodeDeploy shifts traffic to the new task set.

#[serde(rename = "desiredCount")] @@ -1037,7 +1036,7 @@ pub struct ELBInfo { ///

Information about a deployment error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorInformation { ///

For more information, see Error Codes for AWS CodeDeploy in the AWS CodeDeploy User Guide.

The error code:

  • APPLICATIONMISSING: The application was missing. This error code is most likely raised if the application is deleted after the deployment is created, but before it is started.

  • DEPLOYMENTGROUPMISSING: The deployment group was missing. This error code is most likely raised if the deployment group is deleted after the deployment is created, but before it is started.

  • HEALTHCONSTRAINTS: The deployment failed on too many instances to be successfully deployed within the instance health constraints specified.

  • HEALTHCONSTRAINTSINVALID: The revision cannot be successfully deployed within the instance health constraints specified.

  • IAMROLEMISSING: The service role cannot be accessed.

  • IAMROLEPERMISSIONS: The service role does not have the correct permissions.

  • INTERNALERROR: There was an internal error.

  • NOEC2SUBSCRIPTION: The calling account is not subscribed to Amazon EC2.

  • NOINSTANCES: No instances were specified, or no instances can be found.

  • OVERMAXINSTANCES: The maximum number of instances was exceeded.

  • THROTTLED: The operation was throttled because the calling account exceeded the throttling limits of one or more AWS services.

  • TIMEOUT: The deployment has timed out.

  • REVISION_MISSING: The revision ID was missing. This error code is most likely raised if the revision is deleted after the deployment is created, but before it is started.

#[serde(rename = "code")] @@ -1051,7 +1050,7 @@ pub struct ErrorInformation { ///

Information about an application revision.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenericRevisionInfo { ///

The deployment groups for which this is the current target revision.

#[serde(rename = "deploymentGroups")] @@ -1085,7 +1084,7 @@ pub struct GetApplicationInput { ///

Represents the output of a GetApplication operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApplicationOutput { ///

Information about the application.

#[serde(rename = "application")] @@ -1106,7 +1105,7 @@ pub struct GetApplicationRevisionInput { ///

Represents the output of a GetApplicationRevision operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApplicationRevisionOutput { ///

The name of the application that corresponds to the revision.

#[serde(rename = "applicationName")] @@ -1132,7 +1131,7 @@ pub struct GetDeploymentConfigInput { ///

Represents the output of a GetDeploymentConfig operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentConfigOutput { ///

Information about the deployment configuration.

#[serde(rename = "deploymentConfigInfo")] @@ -1153,7 +1152,7 @@ pub struct GetDeploymentGroupInput { ///

Represents the output of a GetDeploymentGroup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentGroupOutput { ///

Information about the deployment group.

#[serde(rename = "deploymentGroupInfo")] @@ -1182,7 +1181,7 @@ pub struct GetDeploymentInstanceInput { ///

Represents the output of a GetDeploymentInstance operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentInstanceOutput { ///

Information about the instance.

#[serde(rename = "instanceSummary")] @@ -1192,7 +1191,7 @@ pub struct GetDeploymentInstanceOutput { ///

Represents the output of a GetDeployment operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentOutput { ///

Information about the deployment.

#[serde(rename = "deploymentInfo")] @@ -1213,7 +1212,7 @@ pub struct GetDeploymentTargetInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentTargetOutput { ///

A deployment target that contains information about a deployment such as its status, lifecyle events, and when it was last updated. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).

#[serde(rename = "deploymentTarget")] @@ -1231,7 +1230,7 @@ pub struct GetOnPremisesInstanceInput { ///

Represents the output of a GetOnPremisesInstance operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOnPremisesInstanceOutput { ///

Information about the on-premises instance.

#[serde(rename = "instanceInfo")] @@ -1263,7 +1262,7 @@ pub struct GreenFleetProvisioningOption { ///

Information about an on-premises instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceInfo { ///

If the on-premises instance was deregistered, the time at which the on-premises instance was deregistered.

#[serde(rename = "deregisterTime")] @@ -1297,7 +1296,7 @@ pub struct InstanceInfo { ///

Information about an instance in a deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceSummary { ///

The unique ID of a deployment.

#[serde(rename = "deploymentId")] @@ -1327,7 +1326,7 @@ pub struct InstanceSummary { ///

A target Amazon EC2 or on-premises instance during a deployment that uses the EC2/On-premises compute platform.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceTarget { ///

The unique ID of a deployment.

#[serde(rename = "deploymentId")] @@ -1361,7 +1360,7 @@ pub struct InstanceTarget { ///

Information about a Lambda function specified in a deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionInfo { ///

The version of a Lambda function that production traffic points to.

#[serde(rename = "currentVersion")] @@ -1387,7 +1386,7 @@ pub struct LambdaFunctionInfo { ///

Information about the target AWS Lambda function during an AWS Lambda deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaTarget { ///

The unique ID of a deployment.

#[serde(rename = "deploymentId")] @@ -1421,7 +1420,7 @@ pub struct LambdaTarget { ///

Information about the most recent attempted or successful deployment to a deployment group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LastDeploymentInfo { ///

A timestamp that indicates when the most recent deployment to the deployment group started.

#[serde(rename = "createTime")] @@ -1443,7 +1442,7 @@ pub struct LastDeploymentInfo { ///

Information about a deployment lifecycle event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LifecycleEvent { ///

Diagnostic information about the deployment lifecycle event.

#[serde(rename = "diagnostics")] @@ -1501,7 +1500,7 @@ pub struct ListApplicationRevisionsInput { ///

Represents the output of a ListApplicationRevisions operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApplicationRevisionsOutput { ///

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

#[serde(rename = "nextToken")] @@ -1524,7 +1523,7 @@ pub struct ListApplicationsInput { ///

Represents the output of a ListApplications operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApplicationsOutput { ///

A list of application names.

#[serde(rename = "applications")] @@ -1547,7 +1546,7 @@ pub struct ListDeploymentConfigsInput { ///

Represents the output of a ListDeploymentConfigs operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeploymentConfigsOutput { ///

A list of deployment configurations, including built-in configurations such as CodeDeployDefault.OneAtATime.

#[serde(rename = "deploymentConfigsList")] @@ -1573,7 +1572,7 @@ pub struct ListDeploymentGroupsInput { ///

Represents the output of a ListDeploymentGroups operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeploymentGroupsOutput { ///

The application name.

#[serde(rename = "applicationName")] @@ -1611,7 +1610,7 @@ pub struct ListDeploymentInstancesInput { ///

Represents the output of a ListDeploymentInstances operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeploymentInstancesOutput { ///

A list of instance IDs.

#[serde(rename = "instancesList")] @@ -1633,14 +1632,14 @@ pub struct ListDeploymentTargetsInput { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

A key used to filter the returned targets.

+ ///

A key used to filter the returned targets. The two valid values are:

  • TargetStatus - A TargetStatus filter string can be Failed, InProgress, Pending, Ready, Skipped, Succeeded, or Unknown.

  • ServerInstanceLabel - A ServerInstanceLabel filter string can be Blue or Green.

#[serde(rename = "targetFilters")] #[serde(skip_serializing_if = "Option::is_none")] pub target_filters: Option<::std::collections::HashMap>>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeploymentTargetsOutput { ///

If a large amount of information is returned, a token identifier is also returned. It can be used in a subsequent ListDeploymentTargets call to return the next set of deployment targets in the list.

#[serde(rename = "nextToken")] @@ -1679,7 +1678,7 @@ pub struct ListDeploymentsInput { ///

Represents the output of a ListDeployments operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeploymentsOutput { ///

A list of deployment IDs.

#[serde(rename = "deployments")] @@ -1702,7 +1701,7 @@ pub struct ListGitHubAccountTokenNamesInput { ///

Represents the output of a ListGitHubAccountTokenNames operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGitHubAccountTokenNamesOutput { ///

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent ListGitHubAccountTokenNames call to return the next set of names in the list.

#[serde(rename = "nextToken")] @@ -1733,7 +1732,7 @@ pub struct ListOnPremisesInstancesInput { ///

Represents the output of the list on-premises instances operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOnPremisesInstancesOutput { ///

The list of matching on-premises instance names.

#[serde(rename = "instanceNames")] @@ -1757,7 +1756,7 @@ pub struct ListTagsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceOutput { ///

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

#[serde(rename = "NextToken")] @@ -1825,7 +1824,7 @@ pub struct PutLifecycleEventHookExecutionStatusInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLifecycleEventHookExecutionStatusOutput { ///

The execution ID of the lifecycle event hook. A hook is specified in the hooks section of the deployment's AppSpec file.

#[serde(rename = "lifecycleEventHookExecutionId")] @@ -1890,7 +1889,7 @@ pub struct RemoveTagsFromOnPremisesInstancesInput { ///

Information about an application revision.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RevisionInfo { ///

Information about an application revision, including usage details and associated deployment groups.

#[serde(rename = "genericRevisionInfo")] @@ -1929,7 +1928,7 @@ pub struct RevisionLocation { ///

Information about a deployment rollback.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RollbackInfo { ///

The ID of the deployment rollback.

#[serde(rename = "rollbackDeploymentId")] @@ -1992,7 +1991,7 @@ pub struct StopDeploymentInput { ///

Represents the output of a StopDeployment operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopDeploymentOutput { ///

The status of the stop deployment operation:

  • Pending: The stop operation is pending.

  • Succeeded: The stop operation was successful.

#[serde(rename = "status")] @@ -2045,7 +2044,7 @@ pub struct TagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceOutput {} ///

Information about a target group in Elastic Load Balancing to use in a deployment. Instances are registered as targets in a target group, and traffic is routed to the target group.

@@ -2184,7 +2183,7 @@ pub struct UntagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceOutput {} ///

Represents the input of an UpdateApplication operation.

@@ -2273,7 +2272,7 @@ pub struct UpdateDeploymentGroupInput { ///

Represents the output of an UpdateDeploymentGroup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeploymentGroupOutput { ///

If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the AWS account. If the output contains data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the AWS account.

#[serde(rename = "hooksNotCleanedUp")] @@ -6342,10 +6341,7 @@ impl CodeDeployClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CodeDeployClient { - CodeDeployClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6359,10 +6355,14 @@ impl CodeDeployClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CodeDeployClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CodeDeployClient { + CodeDeployClient { client, region } } } diff --git a/rusoto/services/codepipeline/Cargo.toml b/rusoto/services/codepipeline/Cargo.toml index 495ec18dd72..70b4438d821 100644 --- a/rusoto/services/codepipeline/Cargo.toml +++ b/rusoto/services/codepipeline/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_codepipeline" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/codepipeline/README.md b/rusoto/services/codepipeline/README.md index 0681de75303..26f7c5bcebc 100644 --- a/rusoto/services/codepipeline/README.md +++ b/rusoto/services/codepipeline/README.md @@ -23,9 +23,16 @@ To use `rusoto_codepipeline` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_codepipeline = "0.40.0" +rusoto_codepipeline = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/codepipeline/src/custom/mod.rs b/rusoto/services/codepipeline/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/codepipeline/src/custom/mod.rs +++ b/rusoto/services/codepipeline/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/codepipeline/src/generated.rs b/rusoto/services/codepipeline/src/generated.rs index b184b2c0f3e..8cab18cea1b 100644 --- a/rusoto/services/codepipeline/src/generated.rs +++ b/rusoto/services/codepipeline/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AWSSessionCredentials { ///

The access key for the session.

#[serde(rename = "accessKeyId")] @@ -52,7 +51,7 @@ pub struct AcknowledgeJobInput { ///

Represents the output of an AcknowledgeJob action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcknowledgeJobOutput { ///

Whether the job worker has received the specified job.

#[serde(rename = "status")] @@ -76,7 +75,7 @@ pub struct AcknowledgeThirdPartyJobInput { ///

Represents the output of an AcknowledgeThirdPartyJob action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcknowledgeThirdPartyJobOutput { ///

The status information for the third party job, if any.

#[serde(rename = "status")] @@ -86,7 +85,7 @@ pub struct AcknowledgeThirdPartyJobOutput { ///

Represents information about an action configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionConfiguration { ///

The configuration data for the action.

#[serde(rename = "configuration")] @@ -125,7 +124,7 @@ pub struct ActionConfigurationProperty { ///

Represents the context of an action within the stage of a pipeline to a job worker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionContext { ///

The system-generated unique ID that corresponds to an action's execution.

#[serde(rename = "actionExecutionId")] @@ -140,10 +139,10 @@ pub struct ActionContext { ///

Represents information about an action declaration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ActionDeclaration { - ///

The configuration information for the action type.

+ ///

Specifies the action type and the provider of the action.

#[serde(rename = "actionTypeId")] pub action_type_id: ActionTypeId, - ///

The action declaration's configuration.

+ ///

The action's configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline. For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide. For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide.

The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:

JSON:

"Configuration" : { Key : Value },

#[serde(rename = "configuration")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration: Option<::std::collections::HashMap>, @@ -174,7 +173,7 @@ pub struct ActionDeclaration { ///

Represents information about the run of an action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionExecution { ///

The details of an error returned by a URL external to AWS.

#[serde(rename = "errorDetails")] @@ -216,7 +215,7 @@ pub struct ActionExecution { ///

Returns information about an execution of an action, including the action execution ID, and the name, version, and timing of the action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionExecutionDetail { ///

The action execution ID.

#[serde(rename = "actionExecutionId")] @@ -271,7 +270,7 @@ pub struct ActionExecutionFilter { ///

Input information used for an action execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionExecutionInput { #[serde(rename = "actionTypeId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -296,7 +295,7 @@ pub struct ActionExecutionInput { ///

Output details listed for an action execution, such as the action execution result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionExecutionOutput { ///

Execution result information listed in the output details for an action execution.

#[serde(rename = "executionResult")] @@ -310,7 +309,7 @@ pub struct ActionExecutionOutput { ///

Execution result information, such as the external execution ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionExecutionResult { ///

The action provider's external ID for the action execution.

#[serde(rename = "externalExecutionId")] @@ -342,7 +341,7 @@ pub struct ActionRevision { ///

Represents information about the state of an action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionState { ///

The name of the action.

#[serde(rename = "actionName")] @@ -368,7 +367,7 @@ pub struct ActionState { ///

Returns information about the details of an action type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActionType { ///

The configuration properties for the action type.

#[serde(rename = "actionConfigurationProperties")] @@ -440,7 +439,7 @@ pub struct ApprovalResult { ///

Represents information about an artifact that will be worked upon by actions in the pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Artifact { ///

The location of an artifact.

#[serde(rename = "location")] @@ -458,7 +457,7 @@ pub struct Artifact { ///

Artifact details for the action execution, such as the artifact location.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArtifactDetail { ///

The artifact object name for the action execution.

#[serde(rename = "name")] @@ -483,7 +482,7 @@ pub struct ArtifactDetails { ///

Represents information about the location of an artifact.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArtifactLocation { ///

The Amazon S3 bucket that contains the artifact.

#[serde(rename = "s3Location")] @@ -497,7 +496,7 @@ pub struct ArtifactLocation { ///

Represents revision details of an artifact.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArtifactRevision { ///

The date and time when the most recent revision of the artifact was created, in timestamp format.

#[serde(rename = "created")] @@ -525,7 +524,7 @@ pub struct ArtifactRevision { pub revision_url: Option, } -///

The Amazon S3 bucket where artifacts are stored for the pipeline.

+///

The Amazon S3 bucket where artifacts are stored for the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ArtifactStore { ///

The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.

@@ -585,7 +584,7 @@ pub struct CreateCustomActionTypeInput { ///

Represents the output of a CreateCustomActionType operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCustomActionTypeOutput { ///

Returns information about the details of an action type.

#[serde(rename = "actionType")] @@ -610,7 +609,7 @@ pub struct CreatePipelineInput { ///

Represents the output of a CreatePipeline action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePipelineOutput { ///

Represents the structure of actions and stages to be performed in the pipeline.

#[serde(rename = "pipeline")] @@ -671,7 +670,7 @@ pub struct DeleteWebhookInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWebhookOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -683,7 +682,7 @@ pub struct DeregisterWebhookWithThirdPartyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterWebhookWithThirdPartyOutput {} ///

Represents the input of a DisableStageTransition action.

@@ -720,7 +719,7 @@ pub struct EnableStageTransitionInput { ///

Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EncryptionKey { - ///

The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.

+ ///

The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.

Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.

#[serde(rename = "id")] pub id: String, ///

The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.

@@ -730,7 +729,7 @@ pub struct EncryptionKey { ///

Represents information about an error in AWS CodePipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorDetails { ///

The system ID or error number code of the error.

#[serde(rename = "code")] @@ -759,6 +758,20 @@ pub struct ExecutionDetails { pub summary: Option, } +///

The interaction or event that started a pipeline execution.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ExecutionTrigger { + ///

Detail related to the event that started a pipeline execution, such as the webhook ARN of the webhook that triggered the pipeline execution or the user ARN for a user-initiated start-pipeline-execution CLI command.

+ #[serde(rename = "triggerDetail")] + #[serde(skip_serializing_if = "Option::is_none")] + pub trigger_detail: Option, + ///

The type of change-detection method, command, or user interaction that started a pipeline execution.

+ #[serde(rename = "triggerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub trigger_type: Option, +} + ///

Represents information about failure details.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct FailureDetails { @@ -784,7 +797,7 @@ pub struct GetJobDetailsInput { ///

Represents the output of a GetJobDetails action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobDetailsOutput { ///

The details of the job.

If AWSSessionCredentials is used, a long-running job can call GetJobDetails again to obtain new credentials.

#[serde(rename = "jobDetails")] @@ -805,7 +818,7 @@ pub struct GetPipelineExecutionInput { ///

Represents the output of a GetPipelineExecution action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPipelineExecutionOutput { ///

Represents information about the execution of a pipeline.

#[serde(rename = "pipelineExecution")] @@ -827,7 +840,7 @@ pub struct GetPipelineInput { ///

Represents the output of a GetPipeline action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPipelineOutput { ///

Represents the pipeline metadata information returned as part of the output of a GetPipeline action.

#[serde(rename = "metadata")] @@ -849,7 +862,7 @@ pub struct GetPipelineStateInput { ///

Represents the output of a GetPipelineState action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPipelineStateOutput { ///

The date and time the pipeline was created, in timestamp format.

#[serde(rename = "created")] @@ -886,7 +899,7 @@ pub struct GetThirdPartyJobDetailsInput { ///

Represents the output of a GetThirdPartyJobDetails action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetThirdPartyJobDetailsOutput { ///

The details of the job, including any protected values defined for the job.

#[serde(rename = "jobDetails")] @@ -904,7 +917,7 @@ pub struct InputArtifact { ///

Represents information about a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

The ID of the AWS account to use when performing the job.

#[serde(rename = "accountId")] @@ -926,7 +939,7 @@ pub struct Job { ///

Represents additional information about a job required for a job worker to complete the job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobData { ///

Represents information about an action configuration.

#[serde(rename = "actionConfiguration")] @@ -964,7 +977,7 @@ pub struct JobData { ///

Represents information about the details of a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobDetails { ///

The AWS account ID associated with the job.

#[serde(rename = "accountId")] @@ -1000,7 +1013,7 @@ pub struct ListActionExecutionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListActionExecutionsOutput { ///

The details for a list of recent executions, such as action execution ID.

#[serde(rename = "actionExecutionDetails")] @@ -1027,7 +1040,7 @@ pub struct ListActionTypesInput { ///

Represents the output of a ListActionTypes action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListActionTypesOutput { ///

Provides details of the action types.

#[serde(rename = "actionTypes")] @@ -1056,7 +1069,7 @@ pub struct ListPipelineExecutionsInput { ///

Represents the output of a ListPipelineExecutions action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPipelineExecutionsOutput { ///

A token that can be used in the next ListPipelineExecutions call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.

#[serde(rename = "nextToken")] @@ -1079,7 +1092,7 @@ pub struct ListPipelinesInput { ///

Represents the output of a ListPipelines action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPipelinesOutput { ///

If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list pipelines call to return the next set of pipelines in the list.

#[serde(rename = "nextToken")] @@ -1107,7 +1120,7 @@ pub struct ListTagsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceOutput { ///

If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent API call to return the next page of the list. However, the ListTagsforResource call lists all available tags in one call and does not use pagination.

#[serde(rename = "nextToken")] @@ -1121,7 +1134,7 @@ pub struct ListTagsForResourceOutput { ///

The detail returned for each webhook after listing webhooks, such as the webhook URL, the webhook name, and the webhook ARN.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWebhookItem { ///

The Amazon Resource Name (ARN) of the webhook.

#[serde(rename = "arn")] @@ -1164,7 +1177,7 @@ pub struct ListWebhooksInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWebhooksOutput { ///

If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent ListWebhooks call to return the next set of webhooks in the list.

#[serde(rename = "NextToken")] @@ -1186,7 +1199,7 @@ pub struct OutputArtifact { ///

Represents information about a pipeline to a job worker.

PipelineContext contains pipelineArn and pipelineExecutionId for custom action jobs. The pipelineArn and pipelineExecutionId fields are not populated for ThirdParty action jobs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineContext { ///

The context of an action to a job worker within the stage of a pipeline.

#[serde(rename = "action")] @@ -1213,11 +1226,11 @@ pub struct PipelineContext { ///

Represents the structure of actions and stages to be performed in the pipeline.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PipelineDeclaration { - ///

Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.

+ ///

Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

#[serde(rename = "artifactStore")] #[serde(skip_serializing_if = "Option::is_none")] pub artifact_store: Option, - ///

A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.

If you create a cross-region action in your pipeline, you must use artifactStores.

+ ///

A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

#[serde(rename = "artifactStores")] #[serde(skip_serializing_if = "Option::is_none")] pub artifact_stores: Option<::std::collections::HashMap>, @@ -1238,7 +1251,7 @@ pub struct PipelineDeclaration { ///

Represents information about an execution of a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineExecution { ///

A list of ArtifactRevision objects included in a pipeline execution.

#[serde(rename = "artifactRevisions")] @@ -1264,7 +1277,7 @@ pub struct PipelineExecution { ///

Summary information about a pipeline execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineExecutionSummary { ///

The date and time of the last change to the pipeline execution, in timestamp format.

#[serde(rename = "lastUpdateTime")] @@ -1286,11 +1299,15 @@ pub struct PipelineExecutionSummary { #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, + ///

The interaction or event that started a pipeline execution, such as automated change detection or a StartPipelineExecution API call.

+ #[serde(rename = "trigger")] + #[serde(skip_serializing_if = "Option::is_none")] + pub trigger: Option, } ///

Information about a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineMetadata { ///

The date and time the pipeline was created, in timestamp format.

#[serde(rename = "created")] @@ -1308,7 +1325,7 @@ pub struct PipelineMetadata { ///

Returns a summary of a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineSummary { ///

The date and time the pipeline was created, in timestamp format.

#[serde(rename = "created")] @@ -1346,7 +1363,7 @@ pub struct PollForJobsInput { ///

Represents the output of a PollForJobs action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PollForJobsOutput { ///

Information about the jobs to take action on.

#[serde(rename = "jobs")] @@ -1368,7 +1385,7 @@ pub struct PollForThirdPartyJobsInput { ///

Represents the output of a PollForThirdPartyJobs action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PollForThirdPartyJobsOutput { ///

Information about the jobs to take action on.

#[serde(rename = "jobs")] @@ -1395,7 +1412,7 @@ pub struct PutActionRevisionInput { ///

Represents the output of a PutActionRevision action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutActionRevisionOutput { ///

Indicates whether the artifact revision was previously used in an execution of the specified pipeline.

#[serde(rename = "newRevision")] @@ -1429,7 +1446,7 @@ pub struct PutApprovalResultInput { ///

Represents the output of a PutApprovalResult action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutApprovalResultOutput { ///

The timestamp showing when the approval or rejection was submitted.

#[serde(rename = "approvedAt")] @@ -1517,7 +1534,7 @@ pub struct PutWebhookInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutWebhookOutput { ///

The detail returned from creating the webhook, such as the webhook name, webhook URL, and webhook ARN.

#[serde(rename = "webhook")] @@ -1534,7 +1551,7 @@ pub struct RegisterWebhookWithThirdPartyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterWebhookWithThirdPartyOutput {} ///

Represents the input of a RetryStageExecution action.

@@ -1556,7 +1573,7 @@ pub struct RetryStageExecutionInput { ///

Represents the output of a RetryStageExecution action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RetryStageExecutionOutput { ///

The ID of the current workflow execution in the failed stage.

#[serde(rename = "pipelineExecutionId")] @@ -1566,7 +1583,7 @@ pub struct RetryStageExecutionOutput { ///

The location of the Amazon S3 bucket that contains a revision.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct S3ArtifactLocation { ///

The name of the Amazon S3 bucket.

#[serde(rename = "bucketName")] @@ -1578,7 +1595,7 @@ pub struct S3ArtifactLocation { ///

The Amazon S3 artifact location for an action's artifacts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct S3Location { ///

The Amazon S3 artifact bucket for an action's artifacts.

#[serde(rename = "bucket")] @@ -1592,7 +1609,7 @@ pub struct S3Location { ///

Information about the version (or revision) of a source artifact that initiated a pipeline execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SourceRevision { ///

The name of the action that processed the revision to the source artifact.

#[serde(rename = "actionName")] @@ -1613,7 +1630,7 @@ pub struct SourceRevision { ///

Represents information about a stage to a job worker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StageContext { ///

The name of the stage.

#[serde(rename = "name")] @@ -1638,7 +1655,7 @@ pub struct StageDeclaration { ///

Represents information about the run of a stage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StageExecution { ///

The ID of the pipeline execution associated with the stage.

#[serde(rename = "pipelineExecutionId")] @@ -1650,7 +1667,7 @@ pub struct StageExecution { ///

Represents information about the state of the stage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StageState { ///

The state of the stage.

#[serde(rename = "actionStates")] @@ -1684,7 +1701,7 @@ pub struct StartPipelineExecutionInput { ///

Represents the output of a StartPipelineExecution action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartPipelineExecutionOutput { ///

The unique system-generated ID of the pipeline execution that was started.

#[serde(rename = "pipelineExecutionId")] @@ -1714,12 +1731,12 @@ pub struct TagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceOutput {} ///

A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked upon by a partner action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThirdPartyJob { ///

The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

#[serde(rename = "clientId")] @@ -1733,7 +1750,7 @@ pub struct ThirdPartyJob { ///

Represents information about the job data for a partner action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThirdPartyJobData { ///

Represents information about an action configuration.

#[serde(rename = "actionConfiguration")] @@ -1771,7 +1788,7 @@ pub struct ThirdPartyJobData { ///

The details of a job sent in response to a GetThirdPartyJobDetails request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThirdPartyJobDetails { ///

The data to be returned by the third party job worker.

#[serde(rename = "data")] @@ -1789,7 +1806,7 @@ pub struct ThirdPartyJobDetails { ///

Represents information about the state of transitions between one stage and another stage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransitionState { ///

The user-specified reason why the transition between two stages of a pipeline was disabled.

#[serde(rename = "disabledReason")] @@ -1820,7 +1837,7 @@ pub struct UntagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceOutput {} ///

Represents the input of an UpdatePipeline action.

@@ -1833,7 +1850,7 @@ pub struct UpdatePipelineInput { ///

Represents the output of an UpdatePipeline action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePipelineOutput { ///

The structure of the updated pipeline.

#[serde(rename = "pipeline")] @@ -3539,7 +3556,7 @@ pub trait CodePipeline { input: CreateCustomActionTypeInput, ) -> RusotoFuture; - ///

Creates a pipeline.

+ ///

Creates a pipeline.

In the pipeline structure, you must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

fn create_pipeline( &self, input: CreatePipelineInput, @@ -3746,10 +3763,7 @@ impl CodePipelineClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CodePipelineClient { - CodePipelineClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3763,10 +3777,14 @@ impl CodePipelineClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CodePipelineClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CodePipelineClient { + CodePipelineClient { client, region } } } @@ -3860,7 +3878,7 @@ impl CodePipeline for CodePipelineClient { }) } - ///

Creates a pipeline.

+ ///

Creates a pipeline.

In the pipeline structure, you must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

fn create_pipeline( &self, input: CreatePipelineInput, diff --git a/rusoto/services/codestar/Cargo.toml b/rusoto/services/codestar/Cargo.toml index e927c7b6450..e815d07afa1 100644 --- a/rusoto/services/codestar/Cargo.toml +++ b/rusoto/services/codestar/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_codestar" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/codestar/README.md b/rusoto/services/codestar/README.md index 613d532580b..36b48d6f793 100644 --- a/rusoto/services/codestar/README.md +++ b/rusoto/services/codestar/README.md @@ -23,9 +23,16 @@ To use `rusoto_codestar` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_codestar = "0.40.0" +rusoto_codestar = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/codestar/src/custom/mod.rs b/rusoto/services/codestar/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/codestar/src/custom/mod.rs +++ b/rusoto/services/codestar/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/codestar/src/generated.rs b/rusoto/services/codestar/src/generated.rs index 03e98883c88..22588ff04e1 100644 --- a/rusoto/services/codestar/src/generated.rs +++ b/rusoto/services/codestar/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -46,7 +45,7 @@ pub struct AssociateTeamMemberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateTeamMemberResult { ///

The user- or system-generated token from the initial request that can be used to repeat the request.

#[serde(rename = "clientRequestToken")] @@ -125,7 +124,7 @@ pub struct CreateProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProjectResult { ///

The Amazon Resource Name (ARN) of the created project.

#[serde(rename = "arn")] @@ -161,7 +160,7 @@ pub struct CreateUserProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserProfileResult { ///

The date the user profile was created, in timestamp format.

#[serde(rename = "createdTimestamp")] @@ -204,7 +203,7 @@ pub struct DeleteProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProjectResult { ///

The Amazon Resource Name (ARN) of the deleted project.

#[serde(rename = "projectArn")] @@ -224,7 +223,7 @@ pub struct DeleteUserProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserProfileResult { ///

The Amazon Resource Name (ARN) of the user deleted from AWS CodeStar.

#[serde(rename = "userArn")] @@ -239,7 +238,7 @@ pub struct DescribeProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProjectResult { ///

The Amazon Resource Name (ARN) for the project.

#[serde(rename = "arn")] @@ -287,7 +286,7 @@ pub struct DescribeUserProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserProfileResult { ///

The date and time when the user profile was created in AWS CodeStar, in timestamp format.

#[serde(rename = "createdTimestamp")] @@ -323,7 +322,7 @@ pub struct DisassociateTeamMemberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateTeamMemberResult {} ///

Information about the GitHub repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.

@@ -366,7 +365,7 @@ pub struct ListProjectsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProjectsResult { ///

The continuation token to use when requesting the next set of results, if there are more results to be returned.

#[serde(rename = "nextToken")] @@ -393,7 +392,7 @@ pub struct ListResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourcesResult { ///

The continuation token to use when requesting the next set of results, if there are more results to be returned.

#[serde(rename = "nextToken")] @@ -421,7 +420,7 @@ pub struct ListTagsForProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForProjectResult { ///

Reserved for future use.

#[serde(rename = "nextToken")] @@ -449,7 +448,7 @@ pub struct ListTeamMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTeamMembersResult { ///

The continuation token to use when requesting the next set of results, if there are more results to be returned.

#[serde(rename = "nextToken")] @@ -473,7 +472,7 @@ pub struct ListUserProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUserProfilesResult { ///

The continuation token to use when requesting the next set of results, if there are more results to be returned.

#[serde(rename = "nextToken")] @@ -486,7 +485,7 @@ pub struct ListUserProfilesResult { ///

An indication of whether a project creation or deletion is failed or successful.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectStatus { ///

In the case of a project creation or deletion failure, a reason for the failure.

#[serde(rename = "reason")] @@ -499,7 +498,7 @@ pub struct ProjectStatus { ///

Information about the metadata for a project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectSummary { ///

The Amazon Resource Name (ARN) of the project.

#[serde(rename = "projectArn")] @@ -513,7 +512,7 @@ pub struct ProjectSummary { ///

Information about a resource for a project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "id")] @@ -544,7 +543,7 @@ pub struct TagProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagProjectResult { ///

The tags for the project.

#[serde(rename = "tags")] @@ -554,7 +553,7 @@ pub struct TagProjectResult { ///

Information about a team member in a project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TeamMember { ///

The role assigned to the user in the project. Project roles have different levels of access. For more information, see Working with Teams in the AWS CodeStar User Guide.

#[serde(rename = "projectRole")] @@ -603,7 +602,7 @@ pub struct UntagProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagProjectResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -622,7 +621,7 @@ pub struct UpdateProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProjectResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -644,7 +643,7 @@ pub struct UpdateTeamMemberRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTeamMemberResult { ///

The project role granted to the user.

#[serde(rename = "projectRole")] @@ -680,7 +679,7 @@ pub struct UpdateUserProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserProfileResult { ///

The date the user profile was created, in timestamp format.

#[serde(rename = "createdTimestamp")] @@ -709,7 +708,7 @@ pub struct UpdateUserProfileResult { ///

Information about a user's profile in AWS CodeStar.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserProfileSummary { ///

The display name of a user in AWS CodeStar. For example, this could be set to both first and last name ("Mary Major") or a single name ("Mary"). The display name is also used to generate the initial icon associated with the user in AWS CodeStar projects. If spaces are included in the display name, the first character that appears after the space will be used as the second character in the user initial icon. The initial icon displays a maximum of two characters, so a display name with more than one space (for example "Mary Jane Major") would generate an initial icon using the first character and the first character after the space ("MJ", not "MM").

#[serde(rename = "displayName")] @@ -1637,10 +1636,7 @@ impl CodeStarClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CodeStarClient { - CodeStarClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1654,10 +1650,14 @@ impl CodeStarClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CodeStarClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CodeStarClient { + CodeStarClient { client, region } } } diff --git a/rusoto/services/cognito-identity/Cargo.toml b/rusoto/services/cognito-identity/Cargo.toml index c6d0098e324..157ae040949 100644 --- a/rusoto/services/cognito-identity/Cargo.toml +++ b/rusoto/services/cognito-identity/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cognito_identity" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cognito-identity/README.md b/rusoto/services/cognito-identity/README.md index 7e5e414a765..65885027a65 100644 --- a/rusoto/services/cognito-identity/README.md +++ b/rusoto/services/cognito-identity/README.md @@ -23,9 +23,16 @@ To use `rusoto_cognito_identity` in your application, add it as a dependency in ```toml [dependencies] -rusoto_cognito_identity = "0.40.0" +rusoto_cognito_identity = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cognito-identity/src/custom/mod.rs b/rusoto/services/cognito-identity/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cognito-identity/src/custom/mod.rs +++ b/rusoto/services/cognito-identity/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cognito-identity/src/generated.rs b/rusoto/services/cognito-identity/src/generated.rs index 808ad1883b4..f5dbe1013b5 100644 --- a/rusoto/services/cognito-identity/src/generated.rs +++ b/rusoto/services/cognito-identity/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -78,7 +77,7 @@ pub struct CreateIdentityPoolInput { ///

Credentials for the provided identity ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Credentials { ///

The Access Key portion of the credentials.

#[serde(rename = "AccessKeyId")] @@ -108,7 +107,7 @@ pub struct DeleteIdentitiesInput { ///

Returned in response to a successful DeleteIdentities operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteIdentitiesResponse { ///

An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.

#[serde(rename = "UnprocessedIdentityIds")] @@ -158,7 +157,7 @@ pub struct GetCredentialsForIdentityInput { ///

Returned in response to a successful GetCredentialsForIdentity operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCredentialsForIdentityResponse { ///

Credentials for the provided identity ID.

#[serde(rename = "Credentials")] @@ -188,7 +187,7 @@ pub struct GetIdInput { ///

Returned in response to a GetId request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIdResponse { ///

A unique identifier in the format REGION:GUID.

#[serde(rename = "IdentityId")] @@ -206,7 +205,7 @@ pub struct GetIdentityPoolRolesInput { ///

Returned in response to a successful GetIdentityPoolRoles operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIdentityPoolRolesResponse { ///

An identity pool ID in the format REGION:GUID.

#[serde(rename = "IdentityPoolId")] @@ -243,7 +242,7 @@ pub struct GetOpenIdTokenForDeveloperIdentityInput { ///

Returned in response to a successful GetOpenIdTokenForDeveloperIdentity request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOpenIdTokenForDeveloperIdentityResponse { ///

A unique identifier in the format REGION:GUID.

#[serde(rename = "IdentityId")] @@ -269,7 +268,7 @@ pub struct GetOpenIdTokenInput { ///

Returned in response to a successful GetOpenIdToken request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOpenIdTokenResponse { ///

A unique identifier in the format REGION:GUID. Note that the IdentityId returned may not match the one passed on input.

#[serde(rename = "IdentityId")] @@ -283,7 +282,7 @@ pub struct GetOpenIdTokenResponse { ///

A description of the identity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IdentityDescription { ///

Date on which the identity was created.

#[serde(rename = "CreationDate")] @@ -343,7 +342,7 @@ pub struct IdentityPool { ///

A description of the identity pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IdentityPoolShortDescription { ///

An identity pool ID in the format REGION:GUID.

#[serde(rename = "IdentityPoolId")] @@ -376,7 +375,7 @@ pub struct ListIdentitiesInput { ///

The response to a ListIdentities request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIdentitiesResponse { ///

An object containing a set of identities and associated mappings.

#[serde(rename = "Identities")] @@ -406,7 +405,7 @@ pub struct ListIdentityPoolsInput { ///

The result of a successful ListIdentityPools action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIdentityPoolsResponse { ///

The identity pools returned by the ListIdentityPools action.

#[serde(rename = "IdentityPools")] @@ -426,7 +425,7 @@ pub struct ListTagsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags that are assigned to the identity pool.

#[serde(rename = "Tags")] @@ -460,7 +459,7 @@ pub struct LookupDeveloperIdentityInput { ///

Returned in response to a successful LookupDeveloperIdentity action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LookupDeveloperIdentityResponse { ///

This is the list of developer user identifiers associated with an identity ID. Cognito supports the association of multiple developer user identifiers with an identity ID.

#[serde(rename = "DeveloperUserIdentifierList")] @@ -512,7 +511,7 @@ pub struct MergeDeveloperIdentitiesInput { ///

Returned in response to a successful MergeDeveloperIdentities action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MergeDeveloperIdentitiesResponse { ///

A unique identifier in the format REGION:GUID.

#[serde(rename = "IdentityId")] @@ -571,7 +570,7 @@ pub struct TagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Input to the UnlinkDeveloperIdentity action.

@@ -607,7 +606,7 @@ pub struct UnlinkIdentityInput { ///

An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnprocessedIdentityId { ///

The error code indicating the type of error that occurred.

#[serde(rename = "ErrorCode")] @@ -631,7 +630,7 @@ pub struct UntagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} /// Errors returned by CreateIdentityPool @@ -2190,10 +2189,7 @@ impl CognitoIdentityClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CognitoIdentityClient { - CognitoIdentityClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2207,10 +2203,14 @@ impl CognitoIdentityClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CognitoIdentityClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CognitoIdentityClient { + CognitoIdentityClient { client, region } } } diff --git a/rusoto/services/cognito-idp/Cargo.toml b/rusoto/services/cognito-idp/Cargo.toml index e9eb0824da9..eb886d6c032 100644 --- a/rusoto/services/cognito-idp/Cargo.toml +++ b/rusoto/services/cognito-idp/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cognito_idp" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cognito-idp/README.md b/rusoto/services/cognito-idp/README.md index 4941d8c9289..f6c875ee0de 100644 --- a/rusoto/services/cognito-idp/README.md +++ b/rusoto/services/cognito-idp/README.md @@ -23,9 +23,16 @@ To use `rusoto_cognito_idp` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_cognito_idp = "0.40.0" +rusoto_cognito_idp = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cognito-idp/src/custom/mod.rs b/rusoto/services/cognito-idp/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cognito-idp/src/custom/mod.rs +++ b/rusoto/services/cognito-idp/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cognito-idp/src/generated.rs b/rusoto/services/cognito-idp/src/generated.rs index 8756b5128c1..e7986a8e963 100644 --- a/rusoto/services/cognito-idp/src/generated.rs +++ b/rusoto/services/cognito-idp/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -77,7 +76,7 @@ pub struct AddCustomAttributesRequest { ///

Represents the response from the server for the request to add custom attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddCustomAttributesResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -106,7 +105,7 @@ pub struct AdminConfirmSignUpRequest { ///

Represents the response from the server for the request to confirm registration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminConfirmSignUpResponse {} ///

The configuration for creating a new user profile.

@@ -163,7 +162,7 @@ pub struct AdminCreateUserRequest { ///

Represents the response from the server to the request to create the user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminCreateUserResponse { ///

The newly created user.

#[serde(rename = "User")] @@ -187,7 +186,7 @@ pub struct AdminDeleteUserAttributesRequest { ///

Represents the response received from the server for a request to delete user attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminDeleteUserAttributesResponse {} ///

Represents the request to delete a user as an administrator.

@@ -212,7 +211,7 @@ pub struct AdminDisableProviderForUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminDisableProviderForUserResponse {} ///

Represents the request to disable any user as an administrator.

@@ -228,7 +227,7 @@ pub struct AdminDisableUserRequest { ///

Represents the response received from the server to disable the user as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminDisableUserResponse {} ///

Represents the request that enables the user as an administrator.

@@ -244,7 +243,7 @@ pub struct AdminEnableUserRequest { ///

Represents the response from the server for the request to enable a user as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminEnableUserResponse {} ///

Sends the forgot device request, as an administrator.

@@ -277,7 +276,7 @@ pub struct AdminGetDeviceRequest { ///

Gets the device response, as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminGetDeviceResponse { ///

The device.

#[serde(rename = "Device")] @@ -297,7 +296,7 @@ pub struct AdminGetUserRequest { ///

Represents the response from the server from the request to get the specified user as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminGetUserResponse { ///

Indicates that the status is enabled.

#[serde(rename = "Enabled")] @@ -368,7 +367,7 @@ pub struct AdminInitiateAuthRequest { ///

Initiates the authentication response, as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminInitiateAuthResponse { ///

The result of the authentication response. This is only returned if the caller does not need to pass another challenge. If the caller does need to pass another challenge before it gets tokens, ChallengeName, ChallengeParameters, and Session are returned.

#[serde(rename = "AuthenticationResult")] @@ -402,7 +401,7 @@ pub struct AdminLinkProviderForUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminLinkProviderForUserResponse {} ///

Represents the request to list devices, as an administrator.

@@ -426,7 +425,7 @@ pub struct AdminListDevicesRequest { ///

Lists the device's response, as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminListDevicesResponse { ///

The devices in the list of devices response.

#[serde(rename = "Devices")] @@ -457,7 +456,7 @@ pub struct AdminListGroupsForUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminListGroupsForUserResponse { ///

The groups that the user belongs to.

#[serde(rename = "Groups")] @@ -488,7 +487,7 @@ pub struct AdminListUserAuthEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminListUserAuthEventsResponse { ///

The response object. It includes the EventID, EventType, CreationDate, EventRisk, and EventResponse.

#[serde(rename = "AuthEvents")] @@ -526,7 +525,7 @@ pub struct AdminResetUserPasswordRequest { ///

Represents the response from the server to reset a user password as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminResetUserPasswordResponse {} ///

The request to respond to the authentication challenge, as an administrator.

@@ -561,7 +560,7 @@ pub struct AdminRespondToAuthChallengeRequest { ///

Responds to the authentication challenge, as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminRespondToAuthChallengeResponse { ///

The result returned by the server in response to the authentication request.

#[serde(rename = "AuthenticationResult")] @@ -600,7 +599,7 @@ pub struct AdminSetUserMFAPreferenceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminSetUserMFAPreferenceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -617,7 +616,7 @@ pub struct AdminSetUserPasswordRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminSetUserPasswordResponse {} ///

Represents the request to set user settings as an administrator.

@@ -636,7 +635,7 @@ pub struct AdminSetUserSettingsRequest { ///

Represents the response from the server to set user settings as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminSetUserSettingsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -656,7 +655,7 @@ pub struct AdminUpdateAuthEventFeedbackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminUpdateAuthEventFeedbackResponse {} ///

The request to update the device status, as an administrator.

@@ -679,7 +678,7 @@ pub struct AdminUpdateDeviceStatusRequest { ///

The status response from the request to update the device, as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminUpdateDeviceStatusResponse {} ///

Represents the request to update the user's attributes as an administrator.

@@ -698,7 +697,7 @@ pub struct AdminUpdateUserAttributesRequest { ///

Represents the response from the server for the request to update user attributes as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminUpdateUserAttributesResponse {} ///

The request to sign out of all devices, as an administrator.

@@ -714,7 +713,7 @@ pub struct AdminUserGlobalSignOutRequest { ///

The global sign-out response, as an administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AdminUserGlobalSignOutResponse {} ///

The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.

@@ -757,7 +756,7 @@ pub struct AssociateSoftwareTokenRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateSoftwareTokenResponse { ///

A unique generated shared secret code that is used in the TOTP algorithm to generate a one time code.

#[serde(rename = "SecretCode")] @@ -783,7 +782,7 @@ pub struct AttributeType { ///

The authentication event type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthEventType { ///

The challenge responses.

#[serde(rename = "ChallengeResponses")] @@ -821,7 +820,7 @@ pub struct AuthEventType { ///

The authentication result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthenticationResultType { ///

The access token.

#[serde(rename = "AccessToken")] @@ -851,7 +850,7 @@ pub struct AuthenticationResultType { ///

The challenge response type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChallengeResponseType { ///

The challenge name

#[serde(rename = "ChallengeName")] @@ -879,12 +878,12 @@ pub struct ChangePasswordRequest { ///

The response from the server to the change password request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChangePasswordResponse {} ///

The code delivery details being returned from the server.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CodeDeliveryDetailsType { ///

The attribute name.

#[serde(rename = "AttributeName")] @@ -941,7 +940,7 @@ pub struct ConfirmDeviceRequest { ///

Confirms the device response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmDeviceResponse { ///

Indicates whether the user confirmation is necessary to confirm the device response.

#[serde(rename = "UserConfirmationNecessary")] @@ -980,7 +979,7 @@ pub struct ConfirmForgotPasswordRequest { ///

The response from the server that results from a user's request to retrieve a forgotten password.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmForgotPasswordResponse {} ///

Represents the request to confirm registration of a user.

@@ -1015,7 +1014,7 @@ pub struct ConfirmSignUpRequest { ///

Represents the response from the server for the registration confirmation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmSignUpResponse {} ///

Contextual user data type used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

@@ -1062,7 +1061,7 @@ pub struct CreateGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupResponse { ///

The group object for the group.

#[serde(rename = "Group")] @@ -1095,7 +1094,7 @@ pub struct CreateIdentityProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIdentityProviderResponse { ///

The newly created identity provider object.

#[serde(rename = "IdentityProvider")] @@ -1120,7 +1119,7 @@ pub struct CreateResourceServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceServerResponse { ///

The newly created resource server.

#[serde(rename = "ResourceServer")] @@ -1143,7 +1142,7 @@ pub struct CreateUserImportJobRequest { ///

Represents the response from the server to the request to create the user import job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserImportJobResponse { ///

The job object that represents the user import job.

#[serde(rename = "UserImportJob")] @@ -1216,7 +1215,7 @@ pub struct CreateUserPoolClientRequest { ///

Represents the response from the server to create a user pool client.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserPoolClientResponse { ///

The user pool client that was just created.

#[serde(rename = "UserPoolClient")] @@ -1239,7 +1238,7 @@ pub struct CreateUserPoolDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserPoolDomainResponse { ///

The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.

#[serde(rename = "CloudFrontDomain")] @@ -1329,7 +1328,7 @@ pub struct CreateUserPoolRequest { ///

Represents the response from the server for the request to create a user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserPoolResponse { ///

A container for the user pool details.

#[serde(rename = "UserPool")] @@ -1388,7 +1387,7 @@ pub struct DeleteUserAttributesRequest { ///

Represents the response from the server to delete user attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserAttributesResponse {} ///

Represents the request to delete a user pool client.

@@ -1413,7 +1412,7 @@ pub struct DeleteUserPoolDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserPoolDomainResponse {} ///

Represents the request to delete a user pool.

@@ -1443,7 +1442,7 @@ pub struct DescribeIdentityProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeIdentityProviderResponse { ///

The identity provider that was deleted.

#[serde(rename = "IdentityProvider")] @@ -1461,7 +1460,7 @@ pub struct DescribeResourceServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeResourceServerResponse { ///

The resource server.

#[serde(rename = "ResourceServer")] @@ -1480,7 +1479,7 @@ pub struct DescribeRiskConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRiskConfigurationResponse { ///

The risk configuration.

#[serde(rename = "RiskConfiguration")] @@ -1500,7 +1499,7 @@ pub struct DescribeUserImportJobRequest { ///

Represents the response from the server to the request to describe the user import job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserImportJobResponse { ///

The job object that represents the user import job.

#[serde(rename = "UserImportJob")] @@ -1521,7 +1520,7 @@ pub struct DescribeUserPoolClientRequest { ///

Represents the response from the server from a request to describe the user pool client.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserPoolClientResponse { ///

The user pool client from a server response to describe the user pool client.

#[serde(rename = "UserPoolClient")] @@ -1537,7 +1536,7 @@ pub struct DescribeUserPoolDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserPoolDomainResponse { ///

A domain description object containing information about the domain.

#[serde(rename = "DomainDescription")] @@ -1555,7 +1554,7 @@ pub struct DescribeUserPoolRequest { ///

Represents the response to describe the user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserPoolResponse { ///

The container of metadata returned by the server to describe the pool.

#[serde(rename = "UserPool")] @@ -1591,7 +1590,7 @@ pub struct DeviceSecretVerifierConfigType { ///

The device type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceType { ///

The device attributes.

#[serde(rename = "DeviceAttributes")] @@ -1617,7 +1616,7 @@ pub struct DeviceType { ///

A container for information about a domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainDescriptionType { ///

The AWS account ID for the user pool owner.

#[serde(rename = "AWSAccountId")] @@ -1672,7 +1671,7 @@ pub struct EmailConfigurationType { ///

Specifies the user context data captured at the time of an event request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventContextDataType { ///

The user's city.

#[serde(rename = "City")] @@ -1698,7 +1697,7 @@ pub struct EventContextDataType { ///

Specifies the event feedback type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventFeedbackType { ///

The event feedback date.

#[serde(rename = "FeedbackDate")] @@ -1714,7 +1713,7 @@ pub struct EventFeedbackType { ///

The event risk type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventRiskType { ///

The risk decision.

#[serde(rename = "RiskDecision")] @@ -1763,7 +1762,7 @@ pub struct ForgotPasswordRequest { ///

Respresents the response from the server regarding the request to reset a password.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ForgotPasswordResponse { ///

The code delivery details returned by the server in response to the request to reset a password.

#[serde(rename = "CodeDeliveryDetails")] @@ -1781,7 +1780,7 @@ pub struct GetCSVHeaderRequest { ///

Represents the response from the server to the request to get the header information for the .csv file for the user import job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCSVHeaderResponse { ///

The header information for the .csv file for the user import job.

#[serde(rename = "CSVHeader")] @@ -1807,7 +1806,7 @@ pub struct GetDeviceRequest { ///

Gets the device response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceResponse { ///

The device.

#[serde(rename = "Device")] @@ -1825,7 +1824,7 @@ pub struct GetGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupResponse { ///

The group object for the group.

#[serde(rename = "Group")] @@ -1844,7 +1843,7 @@ pub struct GetIdentityProviderByIdentifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIdentityProviderByIdentifierResponse { ///

The identity provider object.

#[serde(rename = "IdentityProvider")] @@ -1861,7 +1860,7 @@ pub struct GetSigningCertificateRequest { ///

Response from Cognito for a signing certificate request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSigningCertificateResponse { ///

The signing certificate.

#[serde(rename = "Certificate")] @@ -1881,7 +1880,7 @@ pub struct GetUICustomizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUICustomizationResponse { ///

The UI customization information.

#[serde(rename = "UICustomization")] @@ -1901,7 +1900,7 @@ pub struct GetUserAttributeVerificationCodeRequest { ///

The verification code response returned by the server response to get the user attribute verification code.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserAttributeVerificationCodeResponse { ///

The code delivery details returned by the server in response to the request to get the user attribute verification code.

#[serde(rename = "CodeDeliveryDetails")] @@ -1917,7 +1916,7 @@ pub struct GetUserPoolMfaConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserPoolMfaConfigResponse { ///

The multi-factor (MFA) configuration.

#[serde(rename = "MfaConfiguration")] @@ -1943,7 +1942,7 @@ pub struct GetUserRequest { ///

Represents the response from the server from the request to get information about the user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserResponse { ///

Specifies the options for MFA (e.g., email or phone number).

#[serde(rename = "MFAOptions")] @@ -1975,12 +1974,12 @@ pub struct GlobalSignOutRequest { ///

The response to the request to sign out all devices.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GlobalSignOutResponse {} ///

The group type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupType { ///

The date the group was created.

#[serde(rename = "CreationDate")] @@ -2027,7 +2026,7 @@ pub struct HttpHeader { ///

A container for information about an identity provider.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IdentityProviderType { ///

A mapping of identity provider attributes to standard and custom user pool attributes.

#[serde(rename = "AttributeMapping")] @@ -2092,7 +2091,7 @@ pub struct InitiateAuthRequest { ///

Initiates the authentication response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateAuthResponse { ///

The result of the authentication response. This is only returned if the caller does not need to pass another challenge. If the caller does need to pass another challenge before it gets tokens, ChallengeName, ChallengeParameters, and Session are returned.

#[serde(rename = "AuthenticationResult")] @@ -2175,7 +2174,7 @@ pub struct ListDevicesRequest { ///

Represents the response to list devices.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevicesResponse { ///

The devices returned in the list devices response.

#[serde(rename = "Devices")] @@ -2203,7 +2202,7 @@ pub struct ListGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupsResponse { ///

The group objects for the groups.

#[serde(rename = "Groups")] @@ -2231,7 +2230,7 @@ pub struct ListIdentityProvidersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIdentityProvidersResponse { ///

A pagination token.

#[serde(rename = "NextToken")] @@ -2258,7 +2257,7 @@ pub struct ListResourceServersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceServersResponse { ///

A pagination token.

#[serde(rename = "NextToken")] @@ -2277,7 +2276,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags that are assigned to the user pool.

#[serde(rename = "Tags")] @@ -2302,7 +2301,7 @@ pub struct ListUserImportJobsRequest { ///

Represents the response from the server to the request to list the user import jobs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUserImportJobsResponse { ///

An identifier that can be used to return the next set of user import jobs in the list.

#[serde(rename = "PaginationToken")] @@ -2332,7 +2331,7 @@ pub struct ListUserPoolClientsRequest { ///

Represents the response from the server that lists user pool clients.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUserPoolClientsResponse { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "NextToken")] @@ -2358,7 +2357,7 @@ pub struct ListUserPoolsRequest { ///

Represents the response to list user pools.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUserPoolsResponse { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "NextToken")] @@ -2389,7 +2388,7 @@ pub struct ListUsersInGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsersInGroupResponse { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "NextToken")] @@ -2427,7 +2426,7 @@ pub struct ListUsersRequest { ///

The response from the request to list users.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsersResponse { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "PaginationToken")] @@ -2471,7 +2470,7 @@ pub struct MessageTemplateType { ///

The new device metadata type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NewDeviceMetadataType { ///

The device group key.

#[serde(rename = "DeviceGroupKey")] @@ -2570,7 +2569,7 @@ pub struct PasswordPolicyType { ///

A container for identity provider details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProviderDescription { ///

The date the provider was added to the user pool.

#[serde(rename = "CreationDate")] @@ -2632,7 +2631,7 @@ pub struct ResendConfirmationCodeRequest { ///

The response from the server when the Amazon Cognito Your User Pools service makes the request to resend a confirmation code.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResendConfirmationCodeResponse { ///

The code delivery details returned by the server in response to the request to resend the confirmation code.

#[serde(rename = "CodeDeliveryDetails")] @@ -2653,7 +2652,7 @@ pub struct ResourceServerScopeType { ///

A container for information about a resource server for a user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceServerType { ///

The identifier for the resource server.

#[serde(rename = "Identifier")] @@ -2702,7 +2701,7 @@ pub struct RespondToAuthChallengeRequest { ///

The response to respond to the authentication challenge.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RespondToAuthChallengeResponse { ///

The result returned by the server in response to the request to respond to the authentication challenge.

#[serde(rename = "AuthenticationResult")] @@ -2724,7 +2723,7 @@ pub struct RespondToAuthChallengeResponse { ///

The risk configuration type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RiskConfigurationType { ///

The account takeover risk configuration object including the NotifyConfiguration object and Actions to take in the case of an account takeover.

#[serde(rename = "AccountTakeoverRiskConfiguration")] @@ -2837,7 +2836,7 @@ pub struct SetRiskConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetRiskConfigurationResponse { ///

The risk configuration.

#[serde(rename = "RiskConfiguration")] @@ -2869,7 +2868,7 @@ pub struct SetUICustomizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetUICustomizationResponse { ///

The UI customization information.

#[serde(rename = "UICustomization")] @@ -2892,7 +2891,7 @@ pub struct SetUserMFAPreferenceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetUserMFAPreferenceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2915,7 +2914,7 @@ pub struct SetUserPoolMfaConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetUserPoolMfaConfigResponse { ///

The MFA configuration.

#[serde(rename = "MfaConfiguration")] @@ -2944,7 +2943,7 @@ pub struct SetUserSettingsRequest { ///

The response from the server for a set user settings request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetUserSettingsResponse {} ///

Represents the request to register a user.

@@ -2983,7 +2982,7 @@ pub struct SignUpRequest { ///

The response from the server for a registration request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SignUpResponse { ///

The code delivery details returned by the server response to the user registration request.

#[serde(rename = "CodeDeliveryDetails")] @@ -3057,7 +3056,7 @@ pub struct StartUserImportJobRequest { ///

Represents the response from the server to the request to start the user import job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartUserImportJobResponse { ///

The job object that represents the user import job.

#[serde(rename = "UserImportJob")] @@ -3078,7 +3077,7 @@ pub struct StopUserImportJobRequest { ///

Represents the response from the server to the request to stop the user import job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopUserImportJobResponse { ///

The job object that represents the user import job.

#[serde(rename = "UserImportJob")] @@ -3111,12 +3110,12 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

A container for the UI customization information for a user pool's built-in app UI.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UICustomizationType { ///

The CSS values in the UI customization.

#[serde(rename = "CSS")] @@ -3160,7 +3159,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3183,7 +3182,7 @@ pub struct UpdateAuthEventFeedbackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAuthEventFeedbackResponse {} ///

Represents the request to update the device status.

@@ -3203,7 +3202,7 @@ pub struct UpdateDeviceStatusRequest { ///

The response to the request to update the device status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeviceStatusResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3229,7 +3228,7 @@ pub struct UpdateGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGroupResponse { ///

The group object for the group.

#[serde(rename = "Group")] @@ -3260,7 +3259,7 @@ pub struct UpdateIdentityProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIdentityProviderResponse { ///

The identity provider object.

#[serde(rename = "IdentityProvider")] @@ -3285,7 +3284,7 @@ pub struct UpdateResourceServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateResourceServerResponse { ///

The resource server.

#[serde(rename = "ResourceServer")] @@ -3305,7 +3304,7 @@ pub struct UpdateUserAttributesRequest { ///

Represents the response from the server for the request to update user attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserAttributesResponse { ///

The code delivery details list from the server for the request to update user attributes.

#[serde(rename = "CodeDeliveryDetailsList")] @@ -3378,7 +3377,7 @@ pub struct UpdateUserPoolClientRequest { ///

Represents the response from the server to the request to update the user pool client.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserPoolClientResponse { ///

The user pool client value from the response from the server when an update user pool client request is made.

#[serde(rename = "UserPoolClient")] @@ -3402,7 +3401,7 @@ pub struct UpdateUserPoolDomainRequest { ///

The UpdateUserPoolDomain response output.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserPoolDomainResponse { ///

The Amazon CloudFront endpoint that Amazon Cognito set up when you added the custom domain to your user pool.

#[serde(rename = "CloudFrontDomain")] @@ -3480,7 +3479,7 @@ pub struct UpdateUserPoolRequest { ///

Represents the response from the server when you make a request to update the user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserPoolResponse {} ///

Contextual data such as the user's device fingerprint, IP address, or location used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

@@ -3494,7 +3493,7 @@ pub struct UserContextDataType { ///

The user import job type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserImportJobType { ///

The role ARN for the Amazon CloudWatch Logging role for the user import job. For more information, see "Creating the CloudWatch Logs IAM Role" in the Amazon Cognito Developer Guide.

#[serde(rename = "CloudWatchLogsRoleArn")] @@ -3560,7 +3559,7 @@ pub struct UserPoolAddOnsType { ///

The description of the user pool client.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserPoolClientDescription { ///

The ID of the client associated with the user pool.

#[serde(rename = "ClientId")] @@ -3578,7 +3577,7 @@ pub struct UserPoolClientDescription { ///

Contains information about a user pool client.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserPoolClientType { ///

Set to code to initiate a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the token endpoint.

Set to token to specify that the client should get the access token (and, optionally, ID token, based on scopes) directly.

#[serde(rename = "AllowedOAuthFlows")] @@ -3656,7 +3655,7 @@ pub struct UserPoolClientType { ///

A user pool description.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserPoolDescriptionType { ///

The date the user pool description was created.

#[serde(rename = "CreationDate")] @@ -3695,7 +3694,7 @@ pub struct UserPoolPolicyType { ///

A container for information about the user pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserPoolType { ///

The configuration for AdminCreateUser requests.

#[serde(rename = "AdminCreateUserConfig")] @@ -3817,7 +3816,7 @@ pub struct UserPoolType { ///

The user type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserType { ///

A container with information about the user type attributes.

#[serde(rename = "Attributes")] @@ -3898,7 +3897,7 @@ pub struct VerifySoftwareTokenRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VerifySoftwareTokenResponse { ///

The session which should be passed both ways in challenge-response calls to the service.

#[serde(rename = "Session")] @@ -3926,7 +3925,7 @@ pub struct VerifyUserAttributeRequest { ///

A container representing the response from the server from the request to verify user attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VerifyUserAttributeResponse {} /// Errors returned by AddCustomAttributes @@ -12570,10 +12569,7 @@ impl CognitoIdentityProviderClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CognitoIdentityProviderClient { - CognitoIdentityProviderClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -12587,10 +12583,17 @@ impl CognitoIdentityProviderClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CognitoIdentityProviderClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client( + client: Client, + region: region::Region, + ) -> CognitoIdentityProviderClient { + CognitoIdentityProviderClient { client, region } } } diff --git a/rusoto/services/cognito-sync/Cargo.toml b/rusoto/services/cognito-sync/Cargo.toml index 67b0c356d73..d353b4b1e48 100644 --- a/rusoto/services/cognito-sync/Cargo.toml +++ b/rusoto/services/cognito-sync/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cognito_sync" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cognito-sync/README.md b/rusoto/services/cognito-sync/README.md index 2102b2e849f..b25ec3688c1 100644 --- a/rusoto/services/cognito-sync/README.md +++ b/rusoto/services/cognito-sync/README.md @@ -23,9 +23,16 @@ To use `rusoto_cognito_sync` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_cognito_sync = "0.40.0" +rusoto_cognito_sync = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cognito-sync/src/custom/mod.rs b/rusoto/services/cognito-sync/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cognito-sync/src/custom/mod.rs +++ b/rusoto/services/cognito-sync/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cognito-sync/src/generated.rs b/rusoto/services/cognito-sync/src/generated.rs index 840186bbb8d..6d0041ded0e 100644 --- a/rusoto/services/cognito-sync/src/generated.rs +++ b/rusoto/services/cognito-sync/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -35,7 +34,7 @@ pub struct BulkPublishRequest { ///

The output for the BulkPublish operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BulkPublishResponse { ///

A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.

#[serde(rename = "IdentityPoolId")] @@ -62,7 +61,7 @@ pub struct CognitoStreams { ///

A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Dataset { ///

Date on which the dataset was created.

#[serde(rename = "CreationDate")] @@ -110,7 +109,7 @@ pub struct DeleteDatasetRequest { ///

Response to a successful DeleteDataset request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDatasetResponse { ///

A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.

#[serde(rename = "Dataset")] @@ -134,7 +133,7 @@ pub struct DescribeDatasetRequest { ///

Response to a successful DescribeDataset request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDatasetResponse { ///

Meta data for a collection of data for an identity. An identity can have multiple datasets. A dataset can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.

#[serde(rename = "Dataset")] @@ -152,7 +151,7 @@ pub struct DescribeIdentityPoolUsageRequest { ///

Response to a successful DescribeIdentityPoolUsage request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeIdentityPoolUsageResponse { ///

Information about the usage of the identity pool.

#[serde(rename = "IdentityPoolUsage")] @@ -173,7 +172,7 @@ pub struct DescribeIdentityUsageRequest { ///

The response to a successful DescribeIdentityUsage request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeIdentityUsageResponse { ///

Usage information for the identity.

#[serde(rename = "IdentityUsage")] @@ -191,7 +190,7 @@ pub struct GetBulkPublishDetailsRequest { ///

The output for the GetBulkPublishDetails operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBulkPublishDetailsResponse { ///

If BulkPublishStatus is SUCCEEDED, the time the last bulk publish operation completed.

#[serde(rename = "BulkPublishCompleteTime")] @@ -225,7 +224,7 @@ pub struct GetCognitoEventsRequest { ///

The response from the GetCognitoEvents request

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCognitoEventsResponse { ///

The Cognito Events returned from the GetCognitoEvents request

#[serde(rename = "Events")] @@ -243,7 +242,7 @@ pub struct GetIdentityPoolConfigurationRequest { ///

The output for the GetIdentityPoolConfiguration operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIdentityPoolConfigurationResponse { ///

Options to apply to this identity pool for Amazon Cognito streams.

#[serde(rename = "CognitoStreams")] @@ -261,7 +260,7 @@ pub struct GetIdentityPoolConfigurationResponse { ///

Usage information for the identity pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IdentityPoolUsage { ///

Data storage information for the identity pool.

#[serde(rename = "DataStorage")] @@ -283,7 +282,7 @@ pub struct IdentityPoolUsage { ///

Usage information for the identity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IdentityUsage { ///

Total data storage for this identity.

#[serde(rename = "DataStorage")] @@ -328,7 +327,7 @@ pub struct ListDatasetsRequest { ///

Returned for a successful ListDatasets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDatasetsResponse { ///

Number of datasets returned.

#[serde(rename = "Count")] @@ -359,7 +358,7 @@ pub struct ListIdentityPoolUsageRequest { ///

Returned for a successful ListIdentityPoolUsage request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIdentityPoolUsageResponse { ///

Total number of identities for the identity pool.

#[serde(rename = "Count")] @@ -411,7 +410,7 @@ pub struct ListRecordsRequest { ///

Returned for a successful ListRecordsRequest.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRecordsResponse { ///

Total number of records.

#[serde(rename = "Count")] @@ -466,7 +465,7 @@ pub struct PushSync { ///

The basic data structure of a dataset.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Record { ///

The last modified date of the client device.

#[serde(rename = "DeviceLastModifiedDate")] @@ -535,7 +534,7 @@ pub struct RegisterDeviceRequest { ///

Response to a RegisterDevice request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterDeviceResponse { ///

The unique ID generated for this device by Cognito.

#[serde(rename = "DeviceId")] @@ -572,7 +571,7 @@ pub struct SetIdentityPoolConfigurationRequest { ///

The output for the SetIdentityPoolConfiguration operation

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetIdentityPoolConfigurationResponse { ///

Options to apply to this identity pool for Amazon Cognito streams.

#[serde(rename = "CognitoStreams")] @@ -607,7 +606,7 @@ pub struct SubscribeToDatasetRequest { ///

Response to a SubscribeToDataset request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribeToDatasetResponse {} ///

A request to UnsubscribeFromDataset.

@@ -629,7 +628,7 @@ pub struct UnsubscribeFromDatasetRequest { ///

Response to an UnsubscribeFromDataset request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnsubscribeFromDatasetResponse {} ///

A request to post updates to records or add and delete records for a dataset and user.

@@ -663,7 +662,7 @@ pub struct UpdateRecordsRequest { ///

Returned for a successful UpdateRecordsRequest.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRecordsResponse { ///

A list of records that have been updated.

#[serde(rename = "Records")] @@ -1858,10 +1857,7 @@ impl CognitoSyncClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CognitoSyncClient { - CognitoSyncClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1875,10 +1871,14 @@ impl CognitoSyncClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CognitoSyncClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CognitoSyncClient { + CognitoSyncClient { client, region } } } diff --git a/rusoto/services/comprehend/Cargo.toml b/rusoto/services/comprehend/Cargo.toml index d2e29c00c9d..48ac414b485 100644 --- a/rusoto/services/comprehend/Cargo.toml +++ b/rusoto/services/comprehend/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_comprehend" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/comprehend/README.md b/rusoto/services/comprehend/README.md index f737c1c90d0..5d1377b87eb 100644 --- a/rusoto/services/comprehend/README.md +++ b/rusoto/services/comprehend/README.md @@ -23,9 +23,16 @@ To use `rusoto_comprehend` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_comprehend = "0.40.0" +rusoto_comprehend = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/comprehend/src/custom/mod.rs b/rusoto/services/comprehend/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/comprehend/src/custom/mod.rs +++ b/rusoto/services/comprehend/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/comprehend/src/generated.rs b/rusoto/services/comprehend/src/generated.rs index f1e153f1d1a..ba2ca446408 100644 --- a/rusoto/services/comprehend/src/generated.rs +++ b/rusoto/services/comprehend/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectDominantLanguageItemResult { ///

The zero-based index of the document in the input list.

#[serde(rename = "Index")] @@ -46,7 +45,7 @@ pub struct BatchDetectDominantLanguageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectDominantLanguageResponse { ///

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

#[serde(rename = "ErrorList")] @@ -58,7 +57,7 @@ pub struct BatchDetectDominantLanguageResponse { ///

The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectEntitiesItemResult { ///

One or more Entity objects, one for each entity detected in the document.

#[serde(rename = "Entities")] @@ -72,7 +71,7 @@ pub struct BatchDetectEntitiesItemResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDetectEntitiesRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.

@@ -81,7 +80,7 @@ pub struct BatchDetectEntitiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectEntitiesResponse { ///

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

#[serde(rename = "ErrorList")] @@ -93,7 +92,7 @@ pub struct BatchDetectEntitiesResponse { ///

The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectKeyPhrasesItemResult { ///

The zero-based index of the document in the input list.

#[serde(rename = "Index")] @@ -107,7 +106,7 @@ pub struct BatchDetectKeyPhrasesItemResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDetectKeyPhrasesRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

@@ -116,7 +115,7 @@ pub struct BatchDetectKeyPhrasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectKeyPhrasesResponse { ///

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

#[serde(rename = "ErrorList")] @@ -128,7 +127,7 @@ pub struct BatchDetectKeyPhrasesResponse { ///

The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectSentimentItemResult { ///

The zero-based index of the document in the input list.

#[serde(rename = "Index")] @@ -146,7 +145,7 @@ pub struct BatchDetectSentimentItemResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDetectSentimentRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

@@ -155,7 +154,7 @@ pub struct BatchDetectSentimentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectSentimentResponse { ///

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

#[serde(rename = "ErrorList")] @@ -167,7 +166,7 @@ pub struct BatchDetectSentimentResponse { ///

The result of calling the operation. The operation returns one object that is successfully processed by the operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectSyntaxItemResult { ///

The zero-based index of the document in the input list.

#[serde(rename = "Index")] @@ -181,7 +180,7 @@ pub struct BatchDetectSyntaxItemResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDetectSyntaxRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

@@ -190,7 +189,7 @@ pub struct BatchDetectSyntaxRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDetectSyntaxResponse { ///

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

#[serde(rename = "ErrorList")] @@ -202,7 +201,7 @@ pub struct BatchDetectSyntaxResponse { ///

Describes an error that occurred while processing a document in a batch. The operation returns on BatchItemError object for each document that contained an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchItemError { ///

The numeric error code of the error.

#[serde(rename = "ErrorCode")] @@ -220,7 +219,7 @@ pub struct BatchItemError { ///

Describes the result metrics for the test data associated with an documentation classifier.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClassifierEvaluationMetrics { ///

The fraction of the labels that were correct recognized. It is computed by dividing the number of labels in the test documents that were correctly recognized by the total number of labels in the test documents.

#[serde(rename = "Accuracy")] @@ -242,7 +241,7 @@ pub struct ClassifierEvaluationMetrics { ///

Provides information about a document classifier.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClassifierMetadata { ///

Describes the result metrics for the test data associated with an documentation classifier.

#[serde(rename = "EvaluationMetrics")] @@ -277,7 +276,7 @@ pub struct CreateDocumentClassifierRequest { ///

Specifies the format and location of the input data for the job.

#[serde(rename = "InputDataConfig")] pub input_data_config: DocumentClassifierInputDataConfig, - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

Enables the addition of output results configuration parameters for custom classifier jobs.

@@ -299,7 +298,7 @@ pub struct CreateDocumentClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDocumentClassifierResponse { ///

The Amazon Resource Name (ARN) that identifies the document classifier.

#[serde(rename = "DocumentClassifierArn")] @@ -340,7 +339,7 @@ pub struct CreateEntityRecognizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEntityRecognizerResponse { ///

The Amazon Resource Name (ARN) that identifies the entity recognizer.

#[serde(rename = "EntityRecognizerArn")] @@ -356,7 +355,7 @@ pub struct DeleteDocumentClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDocumentClassifierResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -367,7 +366,7 @@ pub struct DeleteEntityRecognizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEntityRecognizerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -378,7 +377,7 @@ pub struct DescribeDocumentClassificationJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDocumentClassificationJobResponse { ///

An object that describes the properties associated with the document classification job.

#[serde(rename = "DocumentClassificationJobProperties")] @@ -394,7 +393,7 @@ pub struct DescribeDocumentClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDocumentClassifierResponse { ///

An object that contains the properties associated with a document classifier.

#[serde(rename = "DocumentClassifierProperties")] @@ -410,7 +409,7 @@ pub struct DescribeDominantLanguageDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDominantLanguageDetectionJobResponse { ///

An object that contains the properties associated with a dominant language detection job.

#[serde(rename = "DominantLanguageDetectionJobProperties")] @@ -426,7 +425,7 @@ pub struct DescribeEntitiesDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEntitiesDetectionJobResponse { ///

An object that contains the properties associated with an entities detection job.

#[serde(rename = "EntitiesDetectionJobProperties")] @@ -442,7 +441,7 @@ pub struct DescribeEntityRecognizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEntityRecognizerResponse { ///

Describes information associated with an entity recognizer.

#[serde(rename = "EntityRecognizerProperties")] @@ -458,7 +457,7 @@ pub struct DescribeKeyPhrasesDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeKeyPhrasesDetectionJobResponse { ///

An object that contains the properties associated with a key phrases detection job.

#[serde(rename = "KeyPhrasesDetectionJobProperties")] @@ -474,7 +473,7 @@ pub struct DescribeSentimentDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSentimentDetectionJobResponse { ///

An object that contains the properties associated with a sentiment detection job.

#[serde(rename = "SentimentDetectionJobProperties")] @@ -490,7 +489,7 @@ pub struct DescribeTopicsDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTopicsDetectionJobResponse { ///

The list of properties for the requested job.

#[serde(rename = "TopicsDetectionJobProperties")] @@ -506,7 +505,7 @@ pub struct DetectDominantLanguageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectDominantLanguageResponse { ///

The languages that Amazon Comprehend detected in the input text. For each language, the response returns the RFC 5646 language code and the level of confidence that Amazon Comprehend has in the accuracy of its inference. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

#[serde(rename = "Languages")] @@ -516,7 +515,7 @@ pub struct DetectDominantLanguageResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DetectEntitiesRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.

@@ -525,7 +524,7 @@ pub struct DetectEntitiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectEntitiesResponse { ///

A collection of entities identified in the input text. For each entity, the response provides the entity text, entity type, where the entity text begins and ends, and the level of confidence that Amazon Comprehend has in the detection. For a list of entity types, see how-entities.

#[serde(rename = "Entities")] @@ -535,7 +534,7 @@ pub struct DetectEntitiesResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DetectKeyPhrasesRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.

@@ -544,7 +543,7 @@ pub struct DetectKeyPhrasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectKeyPhrasesResponse { ///

A collection of key phrases that Amazon Comprehend identified in the input text. For each key phrase, the response provides the text of the key phrase, where the key phrase begins and ends, and the level of confidence that Amazon Comprehend has in the accuracy of the detection.

#[serde(rename = "KeyPhrases")] @@ -554,7 +553,7 @@ pub struct DetectKeyPhrasesResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DetectSentimentRequest { - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.

@@ -563,7 +562,7 @@ pub struct DetectSentimentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectSentimentResponse { ///

The inferred sentiment that Amazon Comprehend has the highest level of confidence in.

#[serde(rename = "Sentiment")] @@ -577,7 +576,7 @@ pub struct DetectSentimentResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DetectSyntaxRequest { - ///

The language code of the input documents. You can specify English ("en") or Spanish ("es").

+ ///

The language code of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt").

#[serde(rename = "LanguageCode")] pub language_code: String, ///

A UTF-8 string. Each string must contain fewer that 5,000 bytes of UTF encoded characters.

@@ -586,7 +585,7 @@ pub struct DetectSyntaxRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectSyntaxResponse { ///

A collection of syntax tokens describing the text. For each token, the response provides the text, the token type, where the text begins and ends, and the level of confidence that Amazon Comprehend has that the token is correct. For a list of token types, see how-syntax.

#[serde(rename = "SyntaxTokens")] @@ -617,7 +616,7 @@ pub struct DocumentClassificationJobFilter { ///

Provides information about a document classification job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentClassificationJobProperties { ///

The Amazon Resource Name (ARN) of the AWS identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

#[serde(rename = "DataAccessRoleArn")] @@ -709,7 +708,7 @@ pub struct DocumentClassifierOutputDataConfig { ///

Provides information about a document classifier.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentClassifierProperties { ///

Information about the document classifier, including the number of documents used for training the classifier, the number of documents used for test the classifier, and an accuracy rating.

#[serde(rename = "ClassifierMetadata")] @@ -771,7 +770,7 @@ pub struct DocumentClassifierProperties { ///

Returns the code for the dominant language in the input text and the level of confidence that Amazon Comprehend has in the accuracy of the detection.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DominantLanguage { ///

The RFC 5646 language code for the dominant language. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

#[serde(rename = "LanguageCode")] @@ -806,7 +805,7 @@ pub struct DominantLanguageDetectionJobFilter { ///

Provides information about a dominant language detection job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DominantLanguageDetectionJobProperties { ///

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

#[serde(rename = "DataAccessRoleArn")] @@ -877,7 +876,7 @@ pub struct EntitiesDetectionJobFilter { ///

Provides information about an entities detection job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntitiesDetectionJobProperties { ///

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

#[serde(rename = "DataAccessRoleArn")] @@ -935,7 +934,7 @@ pub struct EntitiesDetectionJobProperties { ///

Provides information about an entity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Entity { ///

A character offset in the input text that shows where the entity begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A code point is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.

#[serde(rename = "BeginOffset")] @@ -983,9 +982,9 @@ pub struct EntityRecognizerEntityList { pub s3_uri: String, } -///

Detailed information about the accuracy of an entity recognizer.

+///

Detailed information about the accuracy of an entity recognizer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntityRecognizerEvaluationMetrics { ///

A measure of how accurate the recognizer results are for the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

#[serde(rename = "F1Score")] @@ -1032,20 +1031,20 @@ pub struct EntityRecognizerInputDataConfig { #[serde(rename = "EntityList")] #[serde(skip_serializing_if = "Option::is_none")] pub entity_list: Option, - ///

The entity types in the input data for an entity recognizer.

+ ///

The entity types in the input data for an entity recognizer. A maximum of 12 entity types can be used at one time to train an entity recognizer.

#[serde(rename = "EntityTypes")] pub entity_types: Vec, } ///

Detailed information about an entity recognizer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntityRecognizerMetadata { ///

Entity types from the metadata of an entity recognizer.

#[serde(rename = "EntityTypes")] #[serde(skip_serializing_if = "Option::is_none")] pub entity_types: Option>, - ///

Detailed information about the accuracy of an entity recognizer.

+ ///

Detailed information about the accuracy of an entity recognizer.

#[serde(rename = "EvaluationMetrics")] #[serde(skip_serializing_if = "Option::is_none")] pub evaluation_metrics: Option, @@ -1061,8 +1060,16 @@ pub struct EntityRecognizerMetadata { ///

Individual item from the list of entity types in the metadata of an entity recognizer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntityRecognizerMetadataEntityTypesListItem { + ///

Detailed information about the accuracy of the entity recognizer for a specific item on the list of entity types.

+ #[serde(rename = "EvaluationMetrics")] + #[serde(skip_serializing_if = "Option::is_none")] + pub evaluation_metrics: Option, + ///

indicates the number of times the given entity name was seen in the training data.

+ #[serde(rename = "NumberOfTrainMentions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_train_mentions: Option, ///

Type of entity from the list of entity types in the metadata of an entity recognizer.

#[serde(rename = "Type")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1071,7 +1078,7 @@ pub struct EntityRecognizerMetadataEntityTypesListItem { ///

Describes information about an entity recognizer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntityRecognizerProperties { ///

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

#[serde(rename = "DataAccessRoleArn")] @@ -1127,6 +1134,24 @@ pub struct EntityRecognizerProperties { pub vpc_config: Option, } +///

Detailed information about the accuracy of an entity recognizer for a specific entity type.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EntityTypesEvaluationMetrics { + ///

A measure of how accurate the recognizer results are for for a specific entity type in the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

+ #[serde(rename = "F1Score")] + #[serde(skip_serializing_if = "Option::is_none")] + pub f1_score: Option, + ///

A measure of the usefulness of the recognizer results for a specific entity type in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.

+ #[serde(rename = "Precision")] + #[serde(skip_serializing_if = "Option::is_none")] + pub precision: Option, + ///

A measure of how complete the recognizer results are for a specific entity type in the test data. High recall means that the recognizer returned most of the relevant results.

+ #[serde(rename = "Recall")] + #[serde(skip_serializing_if = "Option::is_none")] + pub recall: Option, +} + ///

Information about an individual item on a list of entity types.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EntityTypesListItem { @@ -1149,7 +1174,7 @@ pub struct InputDataConfig { ///

Describes a key noun phrase.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyPhrase { ///

A character offset in the input text that shows where the key phrase begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A code point is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.

#[serde(rename = "BeginOffset")] @@ -1192,7 +1217,7 @@ pub struct KeyPhrasesDetectionJobFilter { ///

Provides information about a key phrases detection job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyPhrasesDetectionJobProperties { ///

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

#[serde(rename = "DataAccessRoleArn")] @@ -1261,7 +1286,7 @@ pub struct ListDocumentClassificationJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDocumentClassificationJobsResponse { ///

A list containing the properties of each job returned.

#[serde(rename = "DocumentClassificationJobPropertiesList")] @@ -1291,7 +1316,7 @@ pub struct ListDocumentClassifiersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDocumentClassifiersResponse { ///

A list containing the properties of each job returned.

#[serde(rename = "DocumentClassifierPropertiesList")] @@ -1320,7 +1345,7 @@ pub struct ListDominantLanguageDetectionJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDominantLanguageDetectionJobsResponse { ///

A list containing the properties of each job that is returned.

#[serde(rename = "DominantLanguageDetectionJobPropertiesList")] @@ -1350,7 +1375,7 @@ pub struct ListEntitiesDetectionJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEntitiesDetectionJobsResponse { ///

A list containing the properties of each job that is returned.

#[serde(rename = "EntitiesDetectionJobPropertiesList")] @@ -1379,7 +1404,7 @@ pub struct ListEntityRecognizersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEntityRecognizersResponse { ///

The list of properties of an entity recognizer.

#[serde(rename = "EntityRecognizerPropertiesList")] @@ -1408,7 +1433,7 @@ pub struct ListKeyPhrasesDetectionJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListKeyPhrasesDetectionJobsResponse { ///

A list containing the properties of each job that is returned.

#[serde(rename = "KeyPhrasesDetectionJobPropertiesList")] @@ -1437,7 +1462,7 @@ pub struct ListSentimentDetectionJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSentimentDetectionJobsResponse { ///

Identifies the next page of results to return.

#[serde(rename = "NextToken")] @@ -1457,7 +1482,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are querying.

#[serde(rename = "ResourceArn")] @@ -1486,7 +1511,7 @@ pub struct ListTopicsDetectionJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTopicsDetectionJobsResponse { ///

Identifies the next page of results to return.

#[serde(rename = "NextToken")] @@ -1512,7 +1537,7 @@ pub struct OutputDataConfig { ///

Identifies the part of speech represented by the token and gives the confidence that Amazon Comprehend has that the part of speech was correctly identified. For more information about the parts of speech that Amazon Comprehend can identify, see how-syntax.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PartOfSpeechTag { ///

The confidence that Amazon Comprehend has that the part of speech was correctly identified.

#[serde(rename = "Score")] @@ -1547,7 +1572,7 @@ pub struct SentimentDetectionJobFilter { ///

Provides information about a sentiment detection job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SentimentDetectionJobProperties { ///

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

#[serde(rename = "DataAccessRoleArn")] @@ -1601,7 +1626,7 @@ pub struct SentimentDetectionJobProperties { ///

Describes the level of confidence that Amazon Comprehend has in the accuracy of its detection of sentiments.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SentimentScore { ///

The level of confidence that Amazon Comprehend has in the accuracy of its detection of the MIXED sentiment.

#[serde(rename = "Mixed")] @@ -1654,7 +1679,7 @@ pub struct StartDocumentClassificationJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartDocumentClassificationJobResponse { ///

The identifier generated for the job. To get the status of the job, use this identifier with the operation.

#[serde(rename = "JobId")] @@ -1696,7 +1721,7 @@ pub struct StartDominantLanguageDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartDominantLanguageDetectionJobResponse { ///

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

#[serde(rename = "JobId")] @@ -1745,7 +1770,7 @@ pub struct StartEntitiesDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartEntitiesDetectionJobResponse { ///

The identifier generated for the job. To get the status of job, use this identifier with the operation.

#[serde(rename = "JobId")] @@ -1773,7 +1798,7 @@ pub struct StartKeyPhrasesDetectionJobRequest { #[serde(rename = "JobName")] #[serde(skip_serializing_if = "Option::is_none")] pub job_name: Option, - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

Specifies where to send the output files.

@@ -1790,7 +1815,7 @@ pub struct StartKeyPhrasesDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartKeyPhrasesDetectionJobResponse { ///

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

#[serde(rename = "JobId")] @@ -1818,7 +1843,7 @@ pub struct StartSentimentDetectionJobRequest { #[serde(rename = "JobName")] #[serde(skip_serializing_if = "Option::is_none")] pub job_name: Option, - ///

The language of the input documents. You can specify English ("en") or Spanish ("es"). All documents must be in the same language.

+ ///

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.

#[serde(rename = "LanguageCode")] pub language_code: String, ///

Specifies where to send the output files.

@@ -1835,7 +1860,7 @@ pub struct StartSentimentDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSentimentDetectionJobResponse { ///

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

#[serde(rename = "JobId")] @@ -1881,7 +1906,7 @@ pub struct StartTopicsDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartTopicsDetectionJobResponse { ///

The identifier generated for the job. To get the status of the job, use this identifier with the DescribeTopicDetectionJob operation.

#[serde(rename = "JobId")] @@ -1901,7 +1926,7 @@ pub struct StopDominantLanguageDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopDominantLanguageDetectionJobResponse { ///

The identifier of the dominant language detection job to stop.

#[serde(rename = "JobId")] @@ -1921,7 +1946,7 @@ pub struct StopEntitiesDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopEntitiesDetectionJobResponse { ///

The identifier of the entities detection job to stop.

#[serde(rename = "JobId")] @@ -1941,7 +1966,7 @@ pub struct StopKeyPhrasesDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopKeyPhrasesDetectionJobResponse { ///

The identifier of the key phrases detection job to stop.

#[serde(rename = "JobId")] @@ -1961,7 +1986,7 @@ pub struct StopSentimentDetectionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopSentimentDetectionJobResponse { ///

The identifier of the sentiment detection job to stop.

#[serde(rename = "JobId")] @@ -1981,7 +2006,7 @@ pub struct StopTrainingDocumentClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopTrainingDocumentClassifierResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1992,12 +2017,12 @@ pub struct StopTrainingEntityRecognizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopTrainingEntityRecognizerResponse {} ///

Represents a work in the input text that was recognized and assigned a part of speech. There is one syntax token record for each word in the source text.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SyntaxToken { ///

The zero-based offset from the beginning of the source text to the first character in the word.

#[serde(rename = "BeginOffset")] @@ -2044,7 +2069,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Provides information for filtering topic detection jobs. For more information, see .

@@ -2070,7 +2095,7 @@ pub struct TopicsDetectionJobFilter { ///

Provides information about a topic detection job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TopicsDetectionJobProperties { ///

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your job data.

#[serde(rename = "DataAccessRoleArn")] @@ -2133,7 +2158,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} ///

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For For more information, see Amazon VPC.

@@ -2219,7 +2244,7 @@ pub enum BatchDetectEntitiesError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -2282,7 +2307,7 @@ pub enum BatchDetectKeyPhrasesError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -2349,7 +2374,7 @@ pub enum BatchDetectSentimentError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -2412,7 +2437,7 @@ pub enum BatchDetectSyntaxError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -2481,7 +2506,7 @@ pub enum CreateDocumentClassifierError { TooManyRequests(String), ///

The request contains more tags than can be associated with a resource (50 tags per resource). The maximum number of tags includes both existing tags and those included in your current request.

TooManyTags(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -2572,7 +2597,7 @@ pub enum CreateEntityRecognizerError { TooManyRequests(String), ///

The request contains more tags than can be associated with a resource (50 tags per resource). The maximum number of tags includes both existing tags and those included in your current request.

TooManyTags(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -3340,7 +3365,7 @@ pub enum DetectEntitiesError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -3393,7 +3418,7 @@ pub enum DetectKeyPhrasesError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -3448,7 +3473,7 @@ pub enum DetectSentimentError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -3501,7 +3526,7 @@ pub enum DetectSyntaxError { InvalidRequest(String), ///

The size of the input text exceeds the limit. Use a smaller document.

TextSizeLimitExceeded(String), - ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, Amazon Comprehend accepts only English or Spanish text.

+ ///

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English is accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

UnsupportedLanguage(String), } @@ -3548,7 +3573,7 @@ impl Error for DetectSyntaxError { pub enum ListDocumentClassificationJobsError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -3609,7 +3634,7 @@ impl Error for ListDocumentClassificationJobsError { pub enum ListDocumentClassifiersError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -3668,7 +3693,7 @@ impl Error for ListDocumentClassifiersError { pub enum ListDominantLanguageDetectionJobsError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -3729,7 +3754,7 @@ impl Error for ListDominantLanguageDetectionJobsError { pub enum ListEntitiesDetectionJobsError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -3788,7 +3813,7 @@ impl Error for ListEntitiesDetectionJobsError { pub enum ListEntityRecognizersError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -3845,7 +3870,7 @@ impl Error for ListEntityRecognizersError { pub enum ListKeyPhrasesDetectionJobsError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -3906,7 +3931,7 @@ impl Error for ListKeyPhrasesDetectionJobsError { pub enum ListSentimentDetectionJobsError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -4014,7 +4039,7 @@ impl Error for ListTagsForResourceError { pub enum ListTopicsDetectionJobsError { ///

An internal server error occurred. Retry your request.

InternalServer(String), - ///

The filter specified for the ListDocumentClassificationJobs operation is invalid. Specify a different filter.

+ ///

The filter specified for the operation is invalid. Specify a different filter.

InvalidFilter(String), ///

The request is invalid.

InvalidRequest(String), @@ -5204,10 +5229,7 @@ impl ComprehendClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ComprehendClient { - ComprehendClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5221,10 +5243,14 @@ impl ComprehendClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ComprehendClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ComprehendClient { + ComprehendClient { client, region } } } diff --git a/rusoto/services/comprehendmedical/Cargo.toml b/rusoto/services/comprehendmedical/Cargo.toml index 75c09bedcdd..914f864e063 100644 --- a/rusoto/services/comprehendmedical/Cargo.toml +++ b/rusoto/services/comprehendmedical/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_comprehendmedical" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/comprehendmedical/README.md b/rusoto/services/comprehendmedical/README.md index 56d6dabd867..4bc065602c0 100644 --- a/rusoto/services/comprehendmedical/README.md +++ b/rusoto/services/comprehendmedical/README.md @@ -23,9 +23,16 @@ To use `rusoto_comprehendmedical` in your application, add it as a dependency in ```toml [dependencies] -rusoto_comprehendmedical = "0.40.0" +rusoto_comprehendmedical = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/comprehendmedical/src/custom/mod.rs b/rusoto/services/comprehendmedical/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/comprehendmedical/src/custom/mod.rs +++ b/rusoto/services/comprehendmedical/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/comprehendmedical/src/generated.rs b/rusoto/services/comprehendmedical/src/generated.rs index 5b4899113a6..e37c172554d 100644 --- a/rusoto/services/comprehendmedical/src/generated.rs +++ b/rusoto/services/comprehendmedical/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An extracted segment of the text that is an attribute of an entity, or otherwise related to an entity, such as the dosage of a medication taken. It contains information about the attribute such as id, begin and end offset within the input text, and the segment of the input text.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Attribute { ///

The 0-based character offset in the input text that shows where the attribute begins. The offset returns the UTF-8 code point in the string.

#[serde(rename = "BeginOffset")] @@ -70,7 +69,7 @@ pub struct DetectEntitiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectEntitiesResponse { ///

The collection of medical entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Comprehend Medical has in the detection and analysis. Attributes and traits of the entity are also returned.

#[serde(rename = "Entities")] @@ -93,7 +92,7 @@ pub struct DetectPHIRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectPHIResponse { ///

The collection of PHI entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Comprehend Medical has in its detection.

#[serde(rename = "Entities")] @@ -106,7 +105,7 @@ pub struct DetectPHIResponse { ///

Provides information about an extracted medical entity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Entity { ///

The extracted attributes that relate to this entity.

#[serde(rename = "Attributes")] @@ -148,7 +147,7 @@ pub struct Entity { ///

Provides contextual information about the extracted entity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Trait { ///

Provides a name or contextual description about the trait.

#[serde(rename = "Name")] @@ -162,7 +161,7 @@ pub struct Trait { ///

An attribute that we extracted, but were unable to relate to an entity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnmappedAttribute { ///

The specific attribute that has been extracted but not mapped to an entity.

#[serde(rename = "Attribute")] @@ -328,10 +327,7 @@ impl ComprehendMedicalClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ComprehendMedicalClient { - ComprehendMedicalClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -345,10 +341,14 @@ impl ComprehendMedicalClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ComprehendMedicalClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ComprehendMedicalClient { + ComprehendMedicalClient { client, region } } } diff --git a/rusoto/services/config/Cargo.toml b/rusoto/services/config/Cargo.toml index 399582f0089..90202e93872 100644 --- a/rusoto/services/config/Cargo.toml +++ b/rusoto/services/config/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_config" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/config/README.md b/rusoto/services/config/README.md index cdb5c6705f2..6e27eeccddd 100644 --- a/rusoto/services/config/README.md +++ b/rusoto/services/config/README.md @@ -23,9 +23,16 @@ To use `rusoto_config` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_config = "0.40.0" +rusoto_config = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/config/src/custom/mod.rs b/rusoto/services/config/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/config/src/custom/mod.rs +++ b/rusoto/services/config/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/config/src/generated.rs b/rusoto/services/config/src/generated.rs index ce1064be6dc..319cc044317 100644 --- a/rusoto/services/config/src/generated.rs +++ b/rusoto/services/config/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -42,7 +41,7 @@ pub struct AccountAggregationSource { ///

Indicates whether an AWS Config rule is compliant based on account ID, region, compliance, and rule name.

A rule is compliant if all of the resources that the rule evaluated comply with it. It is noncompliant if any of these resources do not comply.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AggregateComplianceByConfigRule { ///

The 12-digit account ID of the source account.

#[serde(rename = "AccountId")] @@ -64,7 +63,7 @@ pub struct AggregateComplianceByConfigRule { ///

Returns the number of compliant and noncompliant rules for one or more accounts and regions in an aggregator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AggregateComplianceCount { ///

The number of compliant and noncompliant AWS Config rules.

#[serde(rename = "ComplianceSummary")] @@ -78,7 +77,7 @@ pub struct AggregateComplianceCount { ///

The details of an AWS Config evaluation for an account ID and region in an aggregator. Provides the AWS resource that was evaluated, the compliance of the resource, related time stamps, and supplementary information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AggregateEvaluationResult { ///

The 12-digit account ID of the source account.

#[serde(rename = "AccountId")] @@ -133,7 +132,7 @@ pub struct AggregateResourceIdentifier { ///

The current sync status between the source and the aggregator account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AggregatedSourceStatus { ///

The region authorized to collect aggregated data.

#[serde(rename = "AwsRegion")] @@ -167,7 +166,7 @@ pub struct AggregatedSourceStatus { ///

An object that represents the authorizations granted to aggregator accounts and regions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AggregationAuthorization { ///

The Amazon Resource Name (ARN) of the aggregation object.

#[serde(rename = "AggregationAuthorizationArn")] @@ -189,7 +188,7 @@ pub struct AggregationAuthorization { ///

The detailed configuration of a specified resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BaseConfigurationItem { ///

The 12-digit AWS account ID associated with the resource.

#[serde(rename = "accountId")] @@ -260,7 +259,7 @@ pub struct BatchGetAggregateResourceConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetAggregateResourceConfigResponse { ///

A list that contains the current configuration of one or more resources.

#[serde(rename = "BaseConfigurationItems")] @@ -280,7 +279,7 @@ pub struct BatchGetResourceConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetResourceConfigResponse { ///

A list that contains the current configuration of one or more resources.

#[serde(rename = "baseConfigurationItems")] @@ -294,7 +293,7 @@ pub struct BatchGetResourceConfigResponse { ///

Indicates whether an AWS resource or AWS Config rule is compliant and provides the number of contributors that affect the compliance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Compliance { ///

The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, up to a maximum number.

#[serde(rename = "ComplianceContributorCount")] @@ -308,7 +307,7 @@ pub struct Compliance { ///

Indicates whether an AWS Config rule is compliant. A rule is compliant if all of the resources that the rule evaluated comply with it. A rule is noncompliant if any of these resources do not comply.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceByConfigRule { ///

Indicates whether the AWS Config rule is compliant.

#[serde(rename = "Compliance")] @@ -322,7 +321,7 @@ pub struct ComplianceByConfigRule { ///

Indicates whether an AWS resource that is evaluated according to one or more AWS Config rules is compliant. A resource is compliant if it complies with all of the rules that evaluate it. A resource is noncompliant if it does not comply with one or more of these rules.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceByResource { ///

Indicates whether the AWS resource complies with all of the AWS Config rules that evaluated it.

#[serde(rename = "Compliance")] @@ -340,7 +339,7 @@ pub struct ComplianceByResource { ///

The number of AWS resources or AWS Config rules responsible for the current compliance of the item, up to a maximum number.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceContributorCount { ///

Indicates whether the maximum count is reached.

#[serde(rename = "CapExceeded")] @@ -354,7 +353,7 @@ pub struct ComplianceContributorCount { ///

The number of AWS Config rules or AWS resources that are compliant and noncompliant.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceSummary { ///

The time that AWS Config created the compliance summary.

#[serde(rename = "ComplianceSummaryTimestamp")] @@ -372,7 +371,7 @@ pub struct ComplianceSummary { ///

The number of AWS resources of a specific type that are compliant or noncompliant, up to a maximum of 100 for each.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceSummaryByResourceType { ///

The number of AWS resources that are compliant or noncompliant, up to a maximum of 100 for each.

#[serde(rename = "ComplianceSummary")] @@ -386,7 +385,7 @@ pub struct ComplianceSummaryByResourceType { ///

Provides status of the delivery of the snapshot or the configuration history to the specified Amazon S3 bucket. Also provides the status of notifications about the Amazon S3 delivery to the specified Amazon SNS topic.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigExportDeliveryInfo { ///

The time of the last attempted delivery.

#[serde(rename = "lastAttemptTime")] @@ -494,7 +493,7 @@ pub struct ConfigRuleComplianceSummaryFilters { ///

Status information for your AWS managed Config rules. The status includes information such as the last time the rule ran, the last time it failed, and the related error for the last failure.

This action does not return status information about custom AWS Config rules.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigRuleEvaluationStatus { ///

The Amazon Resource Name (ARN) of the AWS Config rule.

#[serde(rename = "ConfigRuleArn")] @@ -553,7 +552,7 @@ pub struct ConfigSnapshotDeliveryProperties { ///

A list that contains the status of the delivery of the configuration stream notification to the Amazon SNS topic.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigStreamDeliveryInfo { ///

The error code from the last attempted delivery.

#[serde(rename = "lastErrorCode")] @@ -575,7 +574,7 @@ pub struct ConfigStreamDeliveryInfo { ///

The details about the configuration aggregator, including information about source accounts, regions, and metadata of the aggregator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigurationAggregator { ///

Provides a list of source accounts and regions to be aggregated.

#[serde(rename = "AccountAggregationSources")] @@ -605,7 +604,7 @@ pub struct ConfigurationAggregator { ///

A list that contains detailed configurations of a specified resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigurationItem { ///

The 12-digit AWS account ID associated with the resource.

#[serde(rename = "accountId")] @@ -700,7 +699,7 @@ pub struct ConfigurationRecorder { ///

The current status of the configuration recorder.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigurationRecorderStatus { ///

The error code indicating that the recording failed.

#[serde(rename = "lastErrorCode")] @@ -787,9 +786,16 @@ pub struct DeleteEvaluationResultsRequest { ///

The output when you delete the evaluation results for the specified AWS Config rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEvaluationResultsResponse {} +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteOrganizationConfigRuleRequest { + ///

The name of organization config rule that you want to delete.

+ #[serde(rename = "OrganizationConfigRuleName")] + pub organization_config_rule_name: String, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeletePendingAggregationRequestRequest { ///

The 12-digit account ID of the account requesting to aggregate data.

@@ -812,9 +818,28 @@ pub struct DeleteRemediationConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRemediationConfigurationResponse {} +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteRemediationExceptionsRequest { + ///

The name of the AWS Config rule for which you want to delete remediation exception configuration.

+ #[serde(rename = "ConfigRuleName")] + pub config_rule_name: String, + ///

An exception list of resource exception keys to be processed with the current request. AWS Config adds exception for each resource key. For example, AWS Config adds 3 exceptions for 3 resource keys.

+ #[serde(rename = "ResourceKeys")] + pub resource_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteRemediationExceptionsResponse { + ///

Returns a list of failed delete remediation exceptions batch objects. Each object in the batch consists of a list of failed items and failure messages.

+ #[serde(rename = "FailedBatches")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_batches: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteRetentionConfigurationRequest { ///

The name of the retention configuration to delete.

@@ -832,7 +857,7 @@ pub struct DeliverConfigSnapshotRequest { ///

The output for the DeliverConfigSnapshot action, in JSON format.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeliverConfigSnapshotResponse { ///

The ID of the snapshot that is being created.

#[serde(rename = "configSnapshotId")] @@ -867,7 +892,7 @@ pub struct DeliveryChannel { ///

The status of a specified delivery channel.

Valid values: Success | Failure

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeliveryChannelStatus { ///

A list that contains the status of the delivery of the configuration history to the specified Amazon S3 bucket.

#[serde(rename = "configHistoryDeliveryInfo")] @@ -900,20 +925,20 @@ pub struct DescribeAggregateComplianceByConfigRulesRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAggregateComplianceByConfigRulesResponse { ///

Returns a list of AggregateComplianceByConfigRule object.

#[serde(rename = "AggregateComplianceByConfigRules")] #[serde(skip_serializing_if = "Option::is_none")] pub aggregate_compliance_by_config_rules: Option>, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -925,20 +950,20 @@ pub struct DescribeAggregationAuthorizationsRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAggregationAuthorizationsResponse { ///

Returns a list of authorizations granted to various aggregator accounts and regions.

#[serde(rename = "AggregationAuthorizations")] #[serde(skip_serializing_if = "Option::is_none")] pub aggregation_authorizations: Option>, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -963,7 +988,7 @@ pub struct DescribeComplianceByConfigRuleRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeComplianceByConfigRuleResponse { ///

Indicates whether each of the specified AWS Config rules is compliant.

#[serde(rename = "ComplianceByConfigRules")] @@ -1002,7 +1027,7 @@ pub struct DescribeComplianceByResourceRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeComplianceByResourceResponse { ///

Indicates whether the specified AWS resource complies with all of the AWS Config rules that evaluate it.

#[serde(rename = "ComplianceByResources")] @@ -1033,7 +1058,7 @@ pub struct DescribeConfigRuleEvaluationStatusRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigRuleEvaluationStatusResponse { ///

Status information about your AWS managed Config rules.

#[serde(rename = "ConfigRulesEvaluationStatus")] @@ -1060,7 +1085,7 @@ pub struct DescribeConfigRulesRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigRulesResponse { ///

The details about your AWS Config rules.

#[serde(rename = "ConfigRules")] @@ -1081,7 +1106,7 @@ pub struct DescribeConfigurationAggregatorSourcesStatusRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1092,13 +1117,13 @@ pub struct DescribeConfigurationAggregatorSourcesStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationAggregatorSourcesStatusResponse { ///

Returns an AggregatedSourceStatus object.

#[serde(rename = "AggregatedSourceStatusList")] #[serde(skip_serializing_if = "Option::is_none")] pub aggregated_source_status_list: Option>, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1114,20 +1139,20 @@ pub struct DescribeConfigurationAggregatorsRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationAggregatorsResponse { ///

Returns a ConfigurationAggregators object.

#[serde(rename = "ConfigurationAggregators")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration_aggregators: Option>, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1144,7 +1169,7 @@ pub struct DescribeConfigurationRecorderStatusRequest { ///

The output for the DescribeConfigurationRecorderStatus action, in JSON format.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationRecorderStatusResponse { ///

A list that contains status of the specified recorders.

#[serde(rename = "ConfigurationRecordersStatus")] @@ -1163,7 +1188,7 @@ pub struct DescribeConfigurationRecordersRequest { ///

The output for the DescribeConfigurationRecorders action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationRecordersResponse { ///

A list that contains the descriptions of the specified configuration recorders.

#[serde(rename = "ConfigurationRecorders")] @@ -1182,7 +1207,7 @@ pub struct DescribeDeliveryChannelStatusRequest { ///

The output for the DescribeDeliveryChannelStatus action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDeliveryChannelStatusResponse { ///

A list that contains the status of a specified delivery channel.

#[serde(rename = "DeliveryChannelsStatus")] @@ -1201,7 +1226,7 @@ pub struct DescribeDeliveryChannelsRequest { ///

The output for the DescribeDeliveryChannels action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDeliveryChannelsResponse { ///

A list that contains the descriptions of the specified delivery channel.

#[serde(rename = "DeliveryChannels")] @@ -1209,22 +1234,80 @@ pub struct DescribeDeliveryChannelsResponse { pub delivery_channels: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeOrganizationConfigRuleStatusesRequest { + ///

The maximum number of OrganizationConfigRuleStatuses returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The names of organization config rules for which you want status details. If you do not specify any names, AWS Config returns details for all your organization AWS Confg rules.

+ #[serde(rename = "OrganizationConfigRuleNames")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_config_rule_names: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeOrganizationConfigRuleStatusesResponse { + ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of OrganizationConfigRuleStatus objects.

+ #[serde(rename = "OrganizationConfigRuleStatuses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_config_rule_statuses: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeOrganizationConfigRulesRequest { + ///

The maximum number of organization config rules returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The names of organization config rules for which you want details. If you do not specify any names, AWS Config returns details for all your organization config rules.

+ #[serde(rename = "OrganizationConfigRuleNames")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_config_rule_names: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeOrganizationConfigRulesResponse { + ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Retuns a list OrganizationConfigRule objects.

+ #[serde(rename = "OrganizationConfigRules")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_config_rules: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribePendingAggregationRequestsRequest { ///

The maximum number of evaluation results returned on each page. The default is maximum. If you specify 0, AWS Config uses the default.

#[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePendingAggregationRequestsResponse { - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1242,7 +1325,7 @@ pub struct DescribeRemediationConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRemediationConfigurationsResponse { ///

Returns a remediation configuration object.

#[serde(rename = "RemediationConfigurations")] @@ -1250,6 +1333,38 @@ pub struct DescribeRemediationConfigurationsResponse { pub remediation_configurations: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeRemediationExceptionsRequest { + ///

The name of the AWS Config rule.

+ #[serde(rename = "ConfigRuleName")] + pub config_rule_name: String, + ///

The maximum number of RemediationExceptionResourceKey returned on each page. The default is 25. If you specify 0, AWS Config uses the default.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

An exception list of resource exception keys to be processed with the current request. AWS Config adds exception for each resource key. For example, AWS Config adds 3 exceptions for 3 resource keys.

+ #[serde(rename = "ResourceKeys")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_keys: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeRemediationExceptionsResponse { + ///

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Returns a list of remediation exception objects.

+ #[serde(rename = "RemediationExceptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub remediation_exceptions: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeRemediationExecutionStatusRequest { ///

A list of AWS Config rule names.

@@ -1270,7 +1385,7 @@ pub struct DescribeRemediationExecutionStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRemediationExecutionStatusResponse { ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] @@ -1295,7 +1410,7 @@ pub struct DescribeRetentionConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRetentionConfigurationsResponse { ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] @@ -1330,7 +1445,7 @@ pub struct Evaluation { ///

The details of an AWS Config evaluation. Provides the AWS resource that was evaluated, the compliance of the resource, related time stamps, and supplementary information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EvaluationResult { ///

Supplementary information about how the evaluation determined the compliance.

#[serde(rename = "Annotation")] @@ -1360,7 +1475,7 @@ pub struct EvaluationResult { ///

Uniquely identifies an evaluation result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EvaluationResultIdentifier { ///

Identifies an AWS Config rule used to evaluate an AWS resource, and provides the type and ID of the evaluated resource.

#[serde(rename = "EvaluationResultQualifier")] @@ -1374,7 +1489,7 @@ pub struct EvaluationResultIdentifier { ///

Identifies an AWS Config rule that evaluated an AWS resource, and provides the type and ID of the resource that the rule evaluated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EvaluationResultQualifier { ///

The name of the AWS Config rule that was used in the evaluation.

#[serde(rename = "ConfigRuleName")] @@ -1390,9 +1505,32 @@ pub struct EvaluationResultQualifier { pub resource_type: Option, } +///

The controls that AWS Config uses for executing remediations.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ExecutionControls { + ///

A SsmControls object.

+ #[serde(rename = "SsmControls")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ssm_controls: Option, +} + +///

List of each of the failed delete remediation exceptions with specific reasons.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FailedDeleteRemediationExceptionsBatch { + ///

Returns remediation exception resource key object of the failed items.

+ #[serde(rename = "FailedItems")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_items: Option>, + ///

Returns a failure message for delete remediation exception. For example, AWS Config creates an exception due to an internal error.

+ #[serde(rename = "FailureMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failure_message: Option, +} + ///

List of each of the failed remediations with specific reasons.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedRemediationBatch { ///

Returns remediation configurations of the failed items.

#[serde(rename = "FailedItems")] @@ -1404,9 +1542,23 @@ pub struct FailedRemediationBatch { pub failure_message: Option, } +///

List of each of the failed remediation exceptions with specific reasons.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FailedRemediationExceptionBatch { + ///

Returns remediation exception resource key object of the failed items.

+ #[serde(rename = "FailedItems")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_items: Option>, + ///

Returns a failure message. For example, the auto-remediation has failed.

+ #[serde(rename = "FailureMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failure_message: Option, +} + ///

Details about the fields such as name of the field.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FieldInfo { ///

Name of the field.

#[serde(rename = "Name")] @@ -1436,20 +1588,20 @@ pub struct GetAggregateComplianceDetailsByConfigRuleRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAggregateComplianceDetailsByConfigRuleResponse { ///

Returns an AggregateEvaluationResults object.

#[serde(rename = "AggregateEvaluationResults")] #[serde(skip_serializing_if = "Option::is_none")] pub aggregate_evaluation_results: Option>, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1472,14 +1624,14 @@ pub struct GetAggregateConfigRuleComplianceSummaryRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAggregateConfigRuleComplianceSummaryResponse { ///

Returns a list of AggregateComplianceCounts object.

#[serde(rename = "AggregateComplianceCounts")] @@ -1489,7 +1641,7 @@ pub struct GetAggregateConfigRuleComplianceSummaryResponse { #[serde(rename = "GroupByKey")] #[serde(skip_serializing_if = "Option::is_none")] pub group_by_key: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1519,7 +1671,7 @@ pub struct GetAggregateDiscoveredResourceCountsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAggregateDiscoveredResourceCountsResponse { ///

The key passed into the request object. If GroupByKey is not provided, the result will be empty.

#[serde(rename = "GroupByKey")] @@ -1549,7 +1701,7 @@ pub struct GetAggregateResourceConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAggregateResourceConfigResponse { ///

Returns a ConfigurationItem object.

#[serde(rename = "ConfigurationItem")] @@ -1579,7 +1731,7 @@ pub struct GetComplianceDetailsByConfigRuleRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetComplianceDetailsByConfigRuleResponse { ///

Indicates whether the AWS resource complies with the specified AWS Config rule.

#[serde(rename = "EvaluationResults")] @@ -1612,7 +1764,7 @@ pub struct GetComplianceDetailsByResourceRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetComplianceDetailsByResourceResponse { ///

Indicates whether the specified AWS resource complies each AWS Config rule.

#[serde(rename = "EvaluationResults")] @@ -1626,7 +1778,7 @@ pub struct GetComplianceDetailsByResourceResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetComplianceSummaryByConfigRuleResponse { ///

The number of AWS Config rules that are compliant and the number that are noncompliant, up to a maximum of 25 for each.

#[serde(rename = "ComplianceSummary")] @@ -1645,7 +1797,7 @@ pub struct GetComplianceSummaryByResourceTypeRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetComplianceSummaryByResourceTypeResponse { ///

The number of resources that are compliant and the number that are noncompliant. If one or more resource types were provided with the request, the numbers are returned for each resource type. The maximum number returned is 100.

#[serde(rename = "ComplianceSummariesByResourceType")] @@ -1670,7 +1822,7 @@ pub struct GetDiscoveredResourceCountsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDiscoveredResourceCountsResponse { ///

The string that you use in a subsequent request to get the next page of results in a paginated response.

#[serde(rename = "nextToken")] @@ -1686,6 +1838,38 @@ pub struct GetDiscoveredResourceCountsResponse { pub total_discovered_resources: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetOrganizationConfigRuleDetailedStatusRequest { + ///

A StatusDetailFilters object.

+ #[serde(rename = "Filters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filters: Option, + ///

The maximum number of OrganizationConfigRuleDetailedStatus returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The name of organization config rule for which you want status details for member accounts.

+ #[serde(rename = "OrganizationConfigRuleName")] + pub organization_config_rule_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetOrganizationConfigRuleDetailedStatusResponse { + ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of MemberAccountStatus objects.

+ #[serde(rename = "OrganizationConfigRuleDetailedStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_config_rule_detailed_status: Option>, +} + ///

The input for the GetResourceConfigHistory action.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetResourceConfigHistoryRequest { @@ -1719,7 +1903,7 @@ pub struct GetResourceConfigHistoryRequest { ///

The output for the GetResourceConfigHistory action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceConfigHistoryResponse { ///

A list that contains the configuration history of one or more resources.

#[serde(rename = "configurationItems")] @@ -1733,7 +1917,7 @@ pub struct GetResourceConfigHistoryResponse { ///

The count of resources that are grouped by the group name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupedResourceCount { ///

The name of the group that can be region, account ID, or resource type. For example, region1, region2 if the region was chosen as GroupByKey.

#[serde(rename = "GroupName")] @@ -1766,7 +1950,7 @@ pub struct ListAggregateDiscoveredResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAggregateDiscoveredResourcesResponse { ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] @@ -1808,7 +1992,7 @@ pub struct ListDiscoveredResourcesRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDiscoveredResourcesResponse { ///

The string that you use in a subsequent request to get the next page of results in a paginated response.

#[serde(rename = "nextToken")] @@ -1826,7 +2010,7 @@ pub struct ListTagsForResourceRequest { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1836,9 +2020,9 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { - ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

+ ///

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -1848,6 +2032,33 @@ pub struct ListTagsForResourceResponse { pub tags: Option>, } +///

Organization config rule creation or deletion status in each member account. This includes the name of the rule, the status, error code and error message when the rule creation or deletion failed.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MemberAccountStatus { + ///

The 12-digit account ID of a member account.

+ #[serde(rename = "AccountId")] + pub account_id: String, + ///

The name of config rule deployed in the member account.

+ #[serde(rename = "ConfigRuleName")] + pub config_rule_name: String, + ///

An error code that is returned when config rule creation or deletion failed in the member account.

+ #[serde(rename = "ErrorCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_code: Option, + ///

An error message indicating that config rule account creation or deletion has failed due to an error in the member account.

+ #[serde(rename = "ErrorMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_message: Option, + ///

The timestamp of the last status update.

+ #[serde(rename = "LastUpdateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_update_time: Option, + ///

Indicates deployment status for config rule in the member account. When master account calls PutOrganizationConfigRule action for the first time, config rule status is created in the member account. When master account calls PutOrganizationConfigRule action for the second time, config rule status is updated in the member account. Config rule status is deleted when the master account deletes OrganizationConfigRule and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the rule to:

  • CREATESUCCESSFUL when config rule has been created in the member account.

  • CREATEINPROGRESS when config rule is being created in the member account.

  • CREATEFAILED when config rule creation has failed in the member account.

  • DELETEFAILED when config rule deletion has failed in the member account.

  • DELETEINPROGRESS when config rule is being deleted in the member account.

  • DELETESUCCESSFUL when config rule has been deleted in the member account.

  • UPDATESUCCESSFUL when config rule has been updated in the member account.

  • UPDATEINPROGRESS when config rule is being updated in the member account.

  • UPDATEFAILED when config rule deletion has failed in the member account.

+ #[serde(rename = "MemberAccountRuleStatus")] + pub member_account_rule_status: String, +} + ///

This object contains regions to set up the aggregator and an IAM role to retrieve organization details.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct OrganizationAggregationSource { @@ -1864,9 +2075,136 @@ pub struct OrganizationAggregationSource { pub role_arn: String, } +///

An organization config rule that has information about config rules that AWS Config creates in member accounts.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct OrganizationConfigRule { + ///

A comma-separated list of accounts excluded from organization config rule.

+ #[serde(rename = "ExcludedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub excluded_accounts: Option>, + ///

The timestamp of the last update.

+ #[serde(rename = "LastUpdateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_update_time: Option, + ///

The Amazon Resource Name (ARN) of organization config rule.

+ #[serde(rename = "OrganizationConfigRuleArn")] + pub organization_config_rule_arn: String, + ///

The name that you assign to organization config rule.

+ #[serde(rename = "OrganizationConfigRuleName")] + pub organization_config_rule_name: String, + ///

An OrganizationCustomRuleMetadata object.

+ #[serde(rename = "OrganizationCustomRuleMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_custom_rule_metadata: Option, + ///

An OrganizationManagedRuleMetadata object.

+ #[serde(rename = "OrganizationManagedRuleMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_managed_rule_metadata: Option, +} + +///

Returns the status for an organization config rule in an organization.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct OrganizationConfigRuleStatus { + ///

An error code that is returned when organization config rule creation or deletion has failed.

+ #[serde(rename = "ErrorCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_code: Option, + ///

An error message indicating that organization config rule creation or deletion failed due to an error.

+ #[serde(rename = "ErrorMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_message: Option, + ///

The timestamp of the last update.

+ #[serde(rename = "LastUpdateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_update_time: Option, + ///

The name that you assign to organization config rule.

+ #[serde(rename = "OrganizationConfigRuleName")] + pub organization_config_rule_name: String, + ///

Indicates deployment status of an organization config rule. When master account calls PutOrganizationConfigRule action for the first time, config rule status is created in all the member accounts. When master account calls PutOrganizationConfigRule action for the second time, config rule status is updated in all the member accounts. Additionally, config rule status is updated when one or more member accounts join or leave an organization. Config rule status is deleted when the master account deletes OrganizationConfigRule in all the member accounts and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the rule to:

  • CREATESUCCESSFUL when an organization config rule has been successfully created in all the member accounts.

  • CREATEINPROGRESS when an organization config rule creation is in progress.

  • CREATEFAILED when an organization config rule creation failed in one or more member accounts within that organization.

  • DELETEFAILED when an organization config rule deletion failed in one or more member accounts within that organization.

  • DELETEINPROGRESS when an organization config rule deletion is in progress.

  • DELETESUCCESSFUL when an organization config rule has been successfully deleted from all the member accounts.

  • UPDATESUCCESSFUL when an organization config rule has been successfully updated in all the member accounts.

  • UPDATEINPROGRESS when an organization config rule update is in progress.

  • UPDATEFAILED when an organization config rule update failed in one or more member accounts within that organization.

+ #[serde(rename = "OrganizationRuleStatus")] + pub organization_rule_status: String, +} + +///

An object that specifies organization custom rule metadata such as resource type, resource ID of AWS resource, Lamdba function ARN, and organization trigger types that trigger AWS Config to evaluate your AWS resources against a rule. It also provides the frequency with which you want AWS Config to run evaluations for the rule if the trigger type is periodic.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct OrganizationCustomRuleMetadata { + ///

The description that you provide for organization config rule.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

A string, in JSON format, that is passed to organization config rule Lambda function.

+ #[serde(rename = "InputParameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_parameters: Option, + ///

The lambda function ARN.

+ #[serde(rename = "LambdaFunctionArn")] + pub lambda_function_arn: String, + ///

The maximum frequency with which AWS Config runs evaluations for a rule. Your custom rule is triggered when AWS Config delivers the configuration snapshot. For more information, see ConfigSnapshotDeliveryProperties.

By default, rules with a periodic trigger are evaluated every 24 hours. To change the frequency, specify a valid value for the MaximumExecutionFrequency parameter.

+ #[serde(rename = "MaximumExecutionFrequency")] + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_execution_frequency: Option, + ///

The type of notification that triggers AWS Config to run an evaluation for a rule. You can specify the following notification types:

  • ConfigurationItemChangeNotification - Triggers an evaluation when AWS Config delivers a configuration item as a result of a resource change.

  • OversizedConfigurationItemChangeNotification - Triggers an evaluation when AWS Config delivers an oversized configuration item. AWS Config may generate this notification type when a resource changes and the notification exceeds the maximum size allowed by Amazon SNS.

  • ScheduledNotification - Triggers a periodic evaluation at the frequency specified for MaximumExecutionFrequency.

+ #[serde(rename = "OrganizationConfigRuleTriggerTypes")] + pub organization_config_rule_trigger_types: Vec, + ///

The ID of the AWS resource that was evaluated.

+ #[serde(rename = "ResourceIdScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id_scope: Option, + ///

The type of the AWS resource that was evaluated.

+ #[serde(rename = "ResourceTypesScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_types_scope: Option>, + ///

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

+ #[serde(rename = "TagKeyScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_key_scope: Option, + ///

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

+ #[serde(rename = "TagValueScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_value_scope: Option, +} + +///

An object that specifies organization managed rule metadata such as resource type and ID of AWS resource along with the rule identifier. It also provides the frequency with which you want AWS Config to run evaluations for the rule if the trigger type is periodic.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct OrganizationManagedRuleMetadata { + ///

The description that you provide for organization config rule.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

A string, in JSON format, that is passed to organization config rule Lambda function.

+ #[serde(rename = "InputParameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_parameters: Option, + ///

The maximum frequency with which AWS Config runs evaluations for a rule. You are using an AWS managed rule that is triggered at a periodic frequency.

By default, rules with a periodic trigger are evaluated every 24 hours. To change the frequency, specify a valid value for the MaximumExecutionFrequency parameter.

+ #[serde(rename = "MaximumExecutionFrequency")] + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_execution_frequency: Option, + ///

The ID of the AWS resource that was evaluated.

+ #[serde(rename = "ResourceIdScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id_scope: Option, + ///

The type of the AWS resource that was evaluated.

+ #[serde(rename = "ResourceTypesScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_types_scope: Option>, + ///

For organization config managed rules, a predefined identifier from a list. For example, IAM_PASSWORD_POLICY is a managed rule. To reference a managed rule, see Using AWS Managed Config Rules.

+ #[serde(rename = "RuleIdentifier")] + pub rule_identifier: String, + ///

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

+ #[serde(rename = "TagKeyScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_key_scope: Option, + ///

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

+ #[serde(rename = "TagValueScope")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_value_scope: Option, +} + ///

An object that represents the account ID and region of an aggregator account that is requesting authorization but is not yet authorized.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PendingAggregationRequest { ///

The 12-digit account ID of the account requesting to aggregate data.

#[serde(rename = "RequesterAccountId")] @@ -1886,13 +2224,14 @@ pub struct PutAggregationAuthorizationRequest { ///

The region authorized to collect aggregated data.

#[serde(rename = "AuthorizedAwsRegion")] pub authorized_aws_region: String, + ///

An array of tag object.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAggregationAuthorizationResponse { ///

Returns an AggregationAuthorization object.

#[serde(rename = "AggregationAuthorization")] @@ -1905,6 +2244,7 @@ pub struct PutConfigRuleRequest { ///

The rule that you want to add to your account.

#[serde(rename = "ConfigRule")] pub config_rule: ConfigRule, + ///

An array of tag object.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -1923,13 +2263,14 @@ pub struct PutConfigurationAggregatorRequest { #[serde(rename = "OrganizationAggregationSource")] #[serde(skip_serializing_if = "Option::is_none")] pub organization_aggregation_source: Option, + ///

An array of tag object.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutConfigurationAggregatorResponse { ///

Returns a ConfigurationAggregator object.

#[serde(rename = "ConfigurationAggregator")] @@ -1971,7 +2312,7 @@ pub struct PutEvaluationsRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutEvaluationsResponse { ///

Requests that failed because of a client or server error.

#[serde(rename = "FailedEvaluations")] @@ -1979,6 +2320,34 @@ pub struct PutEvaluationsResponse { pub failed_evaluations: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutOrganizationConfigRuleRequest { + ///

A comma-separated list of accounts that you want to exclude from an organization config rule.

+ #[serde(rename = "ExcludedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub excluded_accounts: Option>, + ///

The name that you assign to an organization config rule.

+ #[serde(rename = "OrganizationConfigRuleName")] + pub organization_config_rule_name: String, + ///

An OrganizationCustomRuleMetadata object.

+ #[serde(rename = "OrganizationCustomRuleMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_custom_rule_metadata: Option, + ///

An OrganizationManagedRuleMetadata object.

+ #[serde(rename = "OrganizationManagedRuleMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_managed_rule_metadata: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutOrganizationConfigRuleResponse { + ///

The Amazon Resource Name (ARN) of an organization config rule.

+ #[serde(rename = "OrganizationConfigRuleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_config_rule_arn: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutRemediationConfigurationsRequest { ///

A list of remediation configuration objects.

@@ -1987,7 +2356,7 @@ pub struct PutRemediationConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRemediationConfigurationsResponse { ///

Returns a list of failed remediation batch objects.

#[serde(rename = "FailedBatches")] @@ -1995,6 +2364,33 @@ pub struct PutRemediationConfigurationsResponse { pub failed_batches: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutRemediationExceptionsRequest { + ///

The name of the AWS Config rule for which you want to create remediation exception.

+ #[serde(rename = "ConfigRuleName")] + pub config_rule_name: String, + ///

The exception is automatically deleted after the expiration date.

+ #[serde(rename = "ExpirationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration_time: Option, + ///

The message contains an explanation of the exception.

+ #[serde(rename = "Message")] + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + ///

An exception list of resource exception keys to be processed with the current request. AWS Config adds exception for each resource key. For example, AWS Config adds 3 exceptions for 3 resource keys.

+ #[serde(rename = "ResourceKeys")] + pub resource_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutRemediationExceptionsResponse { + ///

Returns a list of failed remediation exceptions batch objects. Each object in the batch consists of a list of failed items and failure messages.

+ #[serde(rename = "FailedBatches")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_batches: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutRetentionConfigurationRequest { ///

Number of days AWS Config stores your historical information.

Currently, only applicable to the configuration item history.

@@ -2003,7 +2399,7 @@ pub struct PutRetentionConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRetentionConfigurationResponse { ///

Returns a retention configuration object.

#[serde(rename = "RetentionConfiguration")] @@ -2013,7 +2409,7 @@ pub struct PutRetentionConfigurationResponse { ///

Details about the query.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryInfo { ///

Returns a FieldInfo object.

#[serde(rename = "SelectFields")] @@ -2040,7 +2436,7 @@ pub struct RecordingGroup { ///

The relationship of the related resource to the main resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Relationship { ///

The type of relationship with the related resource.

#[serde(rename = "relationshipName")] @@ -2063,9 +2459,29 @@ pub struct Relationship { ///

An object that represents the details about the remediation configuration that includes the remediation action, parameters, and data to execute the action.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RemediationConfiguration { + ///

Amazon Resource Name (ARN) of remediation configuration.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The remediation is triggered automatically.

+ #[serde(rename = "Automatic")] + #[serde(skip_serializing_if = "Option::is_none")] + pub automatic: Option, ///

The name of the AWS Config rule.

#[serde(rename = "ConfigRuleName")] pub config_rule_name: String, + ///

Name of the service that owns the service linked rule, if applicable.

+ #[serde(rename = "CreatedByService")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_by_service: Option, + ///

An ExecutionControls object.

+ #[serde(rename = "ExecutionControls")] + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_controls: Option, + ///

The maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5.

For example, if you specify MaximumAutomaticAttempts as 5 with RetryAttemptsSeconds as 50 seconds, AWS Config throws an exception after the 5th failed attempt within 50 seconds.

+ #[serde(rename = "MaximumAutomaticAttempts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_automatic_attempts: Option, ///

An object of the RemediationParameterValue.

#[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2074,6 +2490,10 @@ pub struct RemediationConfiguration { #[serde(rename = "ResourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_type: Option, + ///

Maximum time in seconds that AWS Config runs auto-remediation. If you do not select a number, the default is 60 seconds.

For example, if you specify RetryAttemptsSeconds as 50 seconds and MaximumAutomaticAttempts as 5, AWS Config will run auto-remediations 5 times within 50 seconds before throwing an exception.

+ #[serde(rename = "RetryAttemptSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub retry_attempt_seconds: Option, ///

Target ID is the name of the public document.

#[serde(rename = "TargetId")] pub target_id: String, @@ -2086,26 +2506,62 @@ pub struct RemediationConfiguration { pub target_version: Option, } -///

Provides details of the current status of the invoked remediation action for that resource.

+///

An object that represents the details about the remediation exception. The details include the rule name, an explanation of an exception, the time when the exception will be deleted, the resource ID, and resource type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] -pub struct RemediationExecutionStatus { - ///

Start time when the remediation was executed.

- #[serde(rename = "InvocationTime")] - #[serde(skip_serializing_if = "Option::is_none")] - pub invocation_time: Option, - ///

The time when the remediation execution was last updated.

- #[serde(rename = "LastUpdatedTime")] - #[serde(skip_serializing_if = "Option::is_none")] - pub last_updated_time: Option, - #[serde(rename = "ResourceKey")] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RemediationException { + ///

The name of the AWS Config rule.

+ #[serde(rename = "ConfigRuleName")] + pub config_rule_name: String, + ///

The time when the remediation exception will be deleted.

+ #[serde(rename = "ExpirationTime")] #[serde(skip_serializing_if = "Option::is_none")] - pub resource_key: Option, - ///

ENUM of the values.

- #[serde(rename = "State")] + pub expiration_time: Option, + ///

An explanation of an remediation exception.

+ #[serde(rename = "Message")] #[serde(skip_serializing_if = "Option::is_none")] - pub state: Option, - ///

Details of every step.

+ pub message: Option, + ///

The ID of the resource (for example., sg-xxxxxx).

+ #[serde(rename = "ResourceId")] + pub resource_id: String, + ///

The type of a resource.

+ #[serde(rename = "ResourceType")] + pub resource_type: String, +} + +///

The details that identify a resource within AWS Config, including the resource type and resource ID.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RemediationExceptionResourceKey { + ///

The ID of the resource (for example., sg-xxxxxx).

+ #[serde(rename = "ResourceId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id: Option, + ///

The type of a resource.

+ #[serde(rename = "ResourceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_type: Option, +} + +///

Provides details of the current status of the invoked remediation action for that resource.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct RemediationExecutionStatus { + ///

Start time when the remediation was executed.

+ #[serde(rename = "InvocationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invocation_time: Option, + ///

The time when the remediation execution was last updated.

+ #[serde(rename = "LastUpdatedTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_updated_time: Option, + #[serde(rename = "ResourceKey")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_key: Option, + ///

ENUM of the values.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, + ///

Details of every step.

#[serde(rename = "StepDetails")] #[serde(skip_serializing_if = "Option::is_none")] pub step_details: Option>, @@ -2113,7 +2569,7 @@ pub struct RemediationExecutionStatus { ///

Name of the step from the SSM document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemediationExecutionStep { ///

An error message if the step was interrupted during execution.

#[serde(rename = "ErrorMessage")] @@ -2152,7 +2608,7 @@ pub struct RemediationParameterValue { ///

An object that contains the resource type and the number of resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceCount { ///

The number of resources.

#[serde(rename = "count")] @@ -2204,7 +2660,7 @@ pub struct ResourceFilters { ///

The details that identify a resource that is discovered by AWS Config, including the resource type, ID, and (if available) the custom resource name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceIdentifier { ///

The time that the resource was deleted.

#[serde(rename = "resourceDeletionTime")] @@ -2240,13 +2696,12 @@ pub struct ResourceKey { pub struct ResourceValue { ///

The value is a resource ID.

#[serde(rename = "Value")] - #[serde(skip_serializing_if = "Option::is_none")] - pub value: Option, + pub value: String, } ///

An object with the name of the retention configuration and the retention period in days. The object stores the configuration for data retention in AWS Config.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RetentionConfiguration { ///

The name of the retention configuration object.

#[serde(rename = "Name")] @@ -2293,7 +2748,7 @@ pub struct SelectResourceConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SelectResourceConfigResponse { ///

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

#[serde(rename = "NextToken")] @@ -2341,6 +2796,19 @@ pub struct SourceDetail { pub message_type: Option, } +///

AWS Systems Manager (SSM) specific remediation controls.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SsmControls { + ///

The maximum percentage of remediation actions allowed to run in parallel on the non-compliant resources for that specific rule. You can specify a percentage, such as 10%. The default value is 10.

+ #[serde(rename = "ConcurrentExecutionRatePercentage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub concurrent_execution_rate_percentage: Option, + ///

The percentage of errors that are allowed before SSM stops running automations on non-compliant resources for that specific rule. You can specify a percentage of errors, for example 10%. If you do not specifiy a percentage, the default is 50%. For example, if you set the ErrorPercentage to 40% for 10 non-compliant resources, then SSM stops running the automations when the fifth error is received.

+ #[serde(rename = "ErrorPercentage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_percentage: Option, +} + ///

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartConfigRulesEvaluationRequest { @@ -2352,7 +2820,7 @@ pub struct StartConfigRulesEvaluationRequest { ///

The output when you start the evaluation for the specified AWS Config rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartConfigRulesEvaluationResponse {} ///

The input for the StartConfigurationRecorder action.

@@ -2374,7 +2842,7 @@ pub struct StartRemediationExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartRemediationExecutionResponse { ///

For resources that have failed to start execution, the API returns a resource key object.

#[serde(rename = "FailedItems")] @@ -2391,8 +2859,20 @@ pub struct StartRemediationExecutionResponse { pub struct StaticValue { ///

A list of values. For example, the ARN of the assumed role.

#[serde(rename = "Values")] + pub values: Vec, +} + +///

Status filter object to filter results based on specific member account ID or status type for an organization config rule.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StatusDetailFilters { + ///

The 12-digit account ID of the member account within an organization.

+ #[serde(rename = "AccountId")] #[serde(skip_serializing_if = "Option::is_none")] - pub values: Option>, + pub account_id: Option, + ///

Indicates deployment status for config rule in the member account. When master account calls PutOrganizationConfigRule action for the first time, config rule status is created in the member account. When master account calls PutOrganizationConfigRule action for the second time, config rule status is updated in the member account. Config rule status is deleted when the master account deletes OrganizationConfigRule and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the rule to:

  • CREATESUCCESSFUL when config rule has been created in the member account.

  • CREATEINPROGRESS when config rule is being created in the member account.

  • CREATEFAILED when config rule creation has failed in the member account.

  • DELETEFAILED when config rule deletion has failed in the member account.

  • DELETEINPROGRESS when config rule is being deleted in the member account.

  • DELETESUCCESSFUL when config rule has been deleted in the member account.

  • UPDATESUCCESSFUL when config rule has been updated in the member account.

  • UPDATEINPROGRESS when config rule is being updated in the member account.

  • UPDATEFAILED when config rule deletion has failed in the member account.

+ #[serde(rename = "MemberAccountRuleStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub member_account_rule_status: Option, } ///

The input for the StopConfigurationRecorder action.

@@ -2552,7 +3032,7 @@ impl Error for DeleteAggregationAuthorizationError { pub enum DeleteConfigRuleError { ///

One or more AWS Config rules in the request are invalid. Verify that the rule names are correct and try again.

NoSuchConfigRule(String), - ///

The rule is currently being deleted or the rule is deleting your evaluation results. Try your request again later.

+ ///

You see this exception in the following cases:

  • For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.

  • For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.

  • For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

  • For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.

  • For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.

ResourceInUse(String), } @@ -2708,7 +3188,7 @@ impl Error for DeleteDeliveryChannelError { pub enum DeleteEvaluationResultsError { ///

One or more AWS Config rules in the request are invalid. Verify that the rule names are correct and try again.

NoSuchConfigRule(String), - ///

The rule is currently being deleted or the rule is deleting your evaluation results. Try your request again later.

+ ///

You see this exception in the following cases:

  • For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.

  • For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.

  • For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

  • For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.

  • For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.

ResourceInUse(String), } @@ -2746,6 +3226,59 @@ impl Error for DeleteEvaluationResultsError { } } } +/// Errors returned by DeleteOrganizationConfigRule +#[derive(Debug, PartialEq)] +pub enum DeleteOrganizationConfigRuleError { + ///

You specified one or more organization config rules that do not exist.

+ NoSuchOrganizationConfigRule(String), + ///

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

+ OrganizationAccessDenied(String), + ///

You see this exception in the following cases:

  • For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.

  • For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.

  • For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

  • For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.

  • For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.

+ ResourceInUse(String), +} + +impl DeleteOrganizationConfigRuleError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "NoSuchOrganizationConfigRuleException" => { + return RusotoError::Service( + DeleteOrganizationConfigRuleError::NoSuchOrganizationConfigRule(err.msg), + ) + } + "OrganizationAccessDeniedException" => { + return RusotoError::Service( + DeleteOrganizationConfigRuleError::OrganizationAccessDenied(err.msg), + ) + } + "ResourceInUseException" => { + return RusotoError::Service(DeleteOrganizationConfigRuleError::ResourceInUse( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteOrganizationConfigRuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteOrganizationConfigRuleError { + fn description(&self) -> &str { + match *self { + DeleteOrganizationConfigRuleError::NoSuchOrganizationConfigRule(ref cause) => cause, + DeleteOrganizationConfigRuleError::OrganizationAccessDenied(ref cause) => cause, + DeleteOrganizationConfigRuleError::ResourceInUse(ref cause) => cause, + } + } +} /// Errors returned by DeletePendingAggregationRequest #[derive(Debug, PartialEq)] pub enum DeletePendingAggregationRequestError { @@ -2788,6 +3321,8 @@ impl Error for DeletePendingAggregationRequestError { pub enum DeleteRemediationConfigurationError { ///

You specified an AWS Config rule without a remediation configuration.

NoSuchRemediationConfiguration(String), + ///

Remediation action is in progress. You can either cancel execution in AWS Systems Manager or wait and try again later.

+ RemediationInProgress(String), } impl DeleteRemediationConfigurationError { @@ -2803,6 +3338,11 @@ impl DeleteRemediationConfigurationError { ), ) } + "RemediationInProgressException" => { + return RusotoError::Service( + DeleteRemediationConfigurationError::RemediationInProgress(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2819,6 +3359,44 @@ impl Error for DeleteRemediationConfigurationError { fn description(&self) -> &str { match *self { DeleteRemediationConfigurationError::NoSuchRemediationConfiguration(ref cause) => cause, + DeleteRemediationConfigurationError::RemediationInProgress(ref cause) => cause, + } + } +} +/// Errors returned by DeleteRemediationExceptions +#[derive(Debug, PartialEq)] +pub enum DeleteRemediationExceptionsError { + ///

You tried to delete a remediation exception that does not exist.

+ NoSuchRemediation(String), +} + +impl DeleteRemediationExceptionsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "NoSuchRemediationExceptionException" => { + return RusotoError::Service( + DeleteRemediationExceptionsError::NoSuchRemediation(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteRemediationExceptionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteRemediationExceptionsError { + fn description(&self) -> &str { + match *self { + DeleteRemediationExceptionsError::NoSuchRemediation(ref cause) => cause, } } } @@ -3484,6 +4062,136 @@ impl Error for DescribeDeliveryChannelsError { } } } +/// Errors returned by DescribeOrganizationConfigRuleStatuses +#[derive(Debug, PartialEq)] +pub enum DescribeOrganizationConfigRuleStatusesError { + ///

The specified limit is outside the allowable range.

+ InvalidLimit(String), + ///

The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

+ InvalidNextToken(String), + ///

You specified one or more organization config rules that do not exist.

+ NoSuchOrganizationConfigRule(String), + ///

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

+ OrganizationAccessDenied(String), +} + +impl DescribeOrganizationConfigRuleStatusesError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidLimitException" => { + return RusotoError::Service( + DescribeOrganizationConfigRuleStatusesError::InvalidLimit(err.msg), + ) + } + "InvalidNextTokenException" => { + return RusotoError::Service( + DescribeOrganizationConfigRuleStatusesError::InvalidNextToken(err.msg), + ) + } + "NoSuchOrganizationConfigRuleException" => { + return RusotoError::Service( + DescribeOrganizationConfigRuleStatusesError::NoSuchOrganizationConfigRule( + err.msg, + ), + ) + } + "OrganizationAccessDeniedException" => { + return RusotoError::Service( + DescribeOrganizationConfigRuleStatusesError::OrganizationAccessDenied( + err.msg, + ), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeOrganizationConfigRuleStatusesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeOrganizationConfigRuleStatusesError { + fn description(&self) -> &str { + match *self { + DescribeOrganizationConfigRuleStatusesError::InvalidLimit(ref cause) => cause, + DescribeOrganizationConfigRuleStatusesError::InvalidNextToken(ref cause) => cause, + DescribeOrganizationConfigRuleStatusesError::NoSuchOrganizationConfigRule( + ref cause, + ) => cause, + DescribeOrganizationConfigRuleStatusesError::OrganizationAccessDenied(ref cause) => { + cause + } + } + } +} +/// Errors returned by DescribeOrganizationConfigRules +#[derive(Debug, PartialEq)] +pub enum DescribeOrganizationConfigRulesError { + ///

The specified limit is outside the allowable range.

+ InvalidLimit(String), + ///

The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

+ InvalidNextToken(String), + ///

You specified one or more organization config rules that do not exist.

+ NoSuchOrganizationConfigRule(String), + ///

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

+ OrganizationAccessDenied(String), +} + +impl DescribeOrganizationConfigRulesError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidLimitException" => { + return RusotoError::Service( + DescribeOrganizationConfigRulesError::InvalidLimit(err.msg), + ) + } + "InvalidNextTokenException" => { + return RusotoError::Service( + DescribeOrganizationConfigRulesError::InvalidNextToken(err.msg), + ) + } + "NoSuchOrganizationConfigRuleException" => { + return RusotoError::Service( + DescribeOrganizationConfigRulesError::NoSuchOrganizationConfigRule(err.msg), + ) + } + "OrganizationAccessDeniedException" => { + return RusotoError::Service( + DescribeOrganizationConfigRulesError::OrganizationAccessDenied(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeOrganizationConfigRulesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeOrganizationConfigRulesError { + fn description(&self) -> &str { + match *self { + DescribeOrganizationConfigRulesError::InvalidLimit(ref cause) => cause, + DescribeOrganizationConfigRulesError::InvalidNextToken(ref cause) => cause, + DescribeOrganizationConfigRulesError::NoSuchOrganizationConfigRule(ref cause) => cause, + DescribeOrganizationConfigRulesError::OrganizationAccessDenied(ref cause) => cause, + } + } +} /// Errors returned by DescribePendingAggregationRequests #[derive(Debug, PartialEq)] pub enum DescribePendingAggregationRequestsError { @@ -3564,9 +4272,56 @@ impl Error for DescribeRemediationConfigurationsError { match *self {} } } +/// Errors returned by DescribeRemediationExceptions +#[derive(Debug, PartialEq)] +pub enum DescribeRemediationExceptionsError { + ///

The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

+ InvalidNextToken(String), + ///

One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

+ InvalidParameterValue(String), +} + +impl DescribeRemediationExceptionsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidNextTokenException" => { + return RusotoError::Service( + DescribeRemediationExceptionsError::InvalidNextToken(err.msg), + ) + } + "InvalidParameterValueException" => { + return RusotoError::Service( + DescribeRemediationExceptionsError::InvalidParameterValue(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeRemediationExceptionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeRemediationExceptionsError { + fn description(&self) -> &str { + match *self { + DescribeRemediationExceptionsError::InvalidNextToken(ref cause) => cause, + DescribeRemediationExceptionsError::InvalidParameterValue(ref cause) => cause, + } + } +} /// Errors returned by DescribeRemediationExecutionStatus #[derive(Debug, PartialEq)] pub enum DescribeRemediationExecutionStatusError { + ///

The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

+ InvalidNextToken(String), ///

You specified an AWS Config rule without a remediation configuration.

NoSuchRemediationConfiguration(String), } @@ -3577,6 +4332,11 @@ impl DescribeRemediationExecutionStatusError { ) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "InvalidNextTokenException" => { + return RusotoError::Service( + DescribeRemediationExecutionStatusError::InvalidNextToken(err.msg), + ) + } "NoSuchRemediationConfigurationException" => { return RusotoError::Service( DescribeRemediationExecutionStatusError::NoSuchRemediationConfiguration( @@ -3599,6 +4359,7 @@ impl fmt::Display for DescribeRemediationExecutionStatusError { impl Error for DescribeRemediationExecutionStatusError { fn description(&self) -> &str { match *self { + DescribeRemediationExecutionStatusError::InvalidNextToken(ref cause) => cause, DescribeRemediationExecutionStatusError::NoSuchRemediationConfiguration(ref cause) => { cause } @@ -4079,6 +4840,75 @@ impl Error for GetDiscoveredResourceCountsError { } } } +/// Errors returned by GetOrganizationConfigRuleDetailedStatus +#[derive(Debug, PartialEq)] +pub enum GetOrganizationConfigRuleDetailedStatusError { + ///

The specified limit is outside the allowable range.

+ InvalidLimit(String), + ///

The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

+ InvalidNextToken(String), + ///

You specified one or more organization config rules that do not exist.

+ NoSuchOrganizationConfigRule(String), + ///

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

+ OrganizationAccessDenied(String), +} + +impl GetOrganizationConfigRuleDetailedStatusError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidLimitException" => { + return RusotoError::Service( + GetOrganizationConfigRuleDetailedStatusError::InvalidLimit(err.msg), + ) + } + "InvalidNextTokenException" => { + return RusotoError::Service( + GetOrganizationConfigRuleDetailedStatusError::InvalidNextToken(err.msg), + ) + } + "NoSuchOrganizationConfigRuleException" => { + return RusotoError::Service( + GetOrganizationConfigRuleDetailedStatusError::NoSuchOrganizationConfigRule( + err.msg, + ), + ) + } + "OrganizationAccessDeniedException" => { + return RusotoError::Service( + GetOrganizationConfigRuleDetailedStatusError::OrganizationAccessDenied( + err.msg, + ), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetOrganizationConfigRuleDetailedStatusError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetOrganizationConfigRuleDetailedStatusError { + fn description(&self) -> &str { + match *self { + GetOrganizationConfigRuleDetailedStatusError::InvalidLimit(ref cause) => cause, + GetOrganizationConfigRuleDetailedStatusError::InvalidNextToken(ref cause) => cause, + GetOrganizationConfigRuleDetailedStatusError::NoSuchOrganizationConfigRule( + ref cause, + ) => cause, + GetOrganizationConfigRuleDetailedStatusError::OrganizationAccessDenied(ref cause) => { + cause + } + } + } +} /// Errors returned by GetResourceConfigHistory #[derive(Debug, PartialEq)] pub enum GetResourceConfigHistoryError { @@ -4343,7 +5173,7 @@ impl Error for PutAggregationAuthorizationError { /// Errors returned by PutConfigRule #[derive(Debug, PartialEq)] pub enum PutConfigRuleError { - ///

Indicates one of the following errors:

  • The rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • The AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

+ ///

Indicates one of the following errors:

  • For PutConfigRule, the rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • For PutConfigRule, the AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

  • For OrganizationConfigRule, organization config rule cannot be created because you do not have permissions to call IAM GetRole action or create service linked role.

InsufficientPermissions(String), ///

One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

InvalidParameterValue(String), @@ -4351,7 +5181,7 @@ pub enum PutConfigRuleError { MaxNumberOfConfigRulesExceeded(String), ///

There are no configuration recorders available to provide the role needed to describe your resources. Create a configuration recorder.

NoAvailableConfigurationRecorder(String), - ///

The rule is currently being deleted or the rule is deleting your evaluation results. Try your request again later.

+ ///

You see this exception in the following cases:

  • For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.

  • For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.

  • For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

  • For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.

  • For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.

ResourceInUse(String), } @@ -4412,11 +5242,11 @@ pub enum PutConfigurationAggregatorError { InvalidRole(String), ///

For StartConfigRulesEvaluation API, this exception is thrown if an evaluation is in progress or if you call the StartConfigRulesEvaluation API more than once per minute.

For PutConfigurationAggregator API, this exception is thrown if the number of accounts and aggregators exceeds the limit.

LimitExceeded(String), - ///

Organization does is no longer available.

+ ///

Organization is no longer available.

NoAvailableOrganization(String), - ///

No permission to call the EnableAWSServiceAccess API.

+ ///

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

OrganizationAccessDenied(String), - ///

The configuration aggregator cannot be created because organization does not have all features enabled.

+ ///

AWS Config resource cannot be created because your organization does not have all features enabled.

OrganizationAllFeaturesNotEnabled(String), } @@ -4647,8 +5477,89 @@ impl PutEvaluationsError { "InvalidResultTokenException" => { return RusotoError::Service(PutEvaluationsError::InvalidResultToken(err.msg)) } - "NoSuchConfigRuleException" => { - return RusotoError::Service(PutEvaluationsError::NoSuchConfigRule(err.msg)) + "NoSuchConfigRuleException" => { + return RusotoError::Service(PutEvaluationsError::NoSuchConfigRule(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutEvaluationsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutEvaluationsError { + fn description(&self) -> &str { + match *self { + PutEvaluationsError::InvalidParameterValue(ref cause) => cause, + PutEvaluationsError::InvalidResultToken(ref cause) => cause, + PutEvaluationsError::NoSuchConfigRule(ref cause) => cause, + } + } +} +/// Errors returned by PutOrganizationConfigRule +#[derive(Debug, PartialEq)] +pub enum PutOrganizationConfigRuleError { + ///

Indicates one of the following errors:

  • For PutConfigRule, the rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • For PutConfigRule, the AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

  • For OrganizationConfigRule, organization config rule cannot be created because you do not have permissions to call IAM GetRole action or create service linked role.

+ InsufficientPermissions(String), + ///

One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

+ InvalidParameterValue(String), + ///

You have reached the limit of the number of organization config rules you can create.

+ MaxNumberOfOrganizationConfigRulesExceeded(String), + ///

Organization is no longer available.

+ NoAvailableOrganization(String), + ///

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

+ OrganizationAccessDenied(String), + ///

AWS Config resource cannot be created because your organization does not have all features enabled.

+ OrganizationAllFeaturesNotEnabled(String), + ///

You see this exception in the following cases:

  • For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.

  • For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.

  • For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

  • For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.

  • For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.

+ ResourceInUse(String), +} + +impl PutOrganizationConfigRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InsufficientPermissionsException" => { + return RusotoError::Service( + PutOrganizationConfigRuleError::InsufficientPermissions(err.msg), + ) + } + "InvalidParameterValueException" => { + return RusotoError::Service( + PutOrganizationConfigRuleError::InvalidParameterValue(err.msg), + ) + } + "MaxNumberOfOrganizationConfigRulesExceededException" => { + return RusotoError::Service( + PutOrganizationConfigRuleError::MaxNumberOfOrganizationConfigRulesExceeded( + err.msg, + ), + ) + } + "NoAvailableOrganizationException" => { + return RusotoError::Service( + PutOrganizationConfigRuleError::NoAvailableOrganization(err.msg), + ) + } + "OrganizationAccessDeniedException" => { + return RusotoError::Service( + PutOrganizationConfigRuleError::OrganizationAccessDenied(err.msg), + ) + } + "OrganizationAllFeaturesNotEnabledException" => { + return RusotoError::Service( + PutOrganizationConfigRuleError::OrganizationAllFeaturesNotEnabled(err.msg), + ) + } + "ResourceInUseException" => { + return RusotoError::Service(PutOrganizationConfigRuleError::ResourceInUse( + err.msg, + )) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -4657,24 +5568,30 @@ impl PutEvaluationsError { return RusotoError::Unknown(res); } } -impl fmt::Display for PutEvaluationsError { +impl fmt::Display for PutOrganizationConfigRuleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for PutEvaluationsError { +impl Error for PutOrganizationConfigRuleError { fn description(&self) -> &str { match *self { - PutEvaluationsError::InvalidParameterValue(ref cause) => cause, - PutEvaluationsError::InvalidResultToken(ref cause) => cause, - PutEvaluationsError::NoSuchConfigRule(ref cause) => cause, + PutOrganizationConfigRuleError::InsufficientPermissions(ref cause) => cause, + PutOrganizationConfigRuleError::InvalidParameterValue(ref cause) => cause, + PutOrganizationConfigRuleError::MaxNumberOfOrganizationConfigRulesExceeded( + ref cause, + ) => cause, + PutOrganizationConfigRuleError::NoAvailableOrganization(ref cause) => cause, + PutOrganizationConfigRuleError::OrganizationAccessDenied(ref cause) => cause, + PutOrganizationConfigRuleError::OrganizationAllFeaturesNotEnabled(ref cause) => cause, + PutOrganizationConfigRuleError::ResourceInUse(ref cause) => cause, } } } /// Errors returned by PutRemediationConfigurations #[derive(Debug, PartialEq)] pub enum PutRemediationConfigurationsError { - ///

Indicates one of the following errors:

  • The rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • The AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

+ ///

Indicates one of the following errors:

  • For PutConfigRule, the rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • For PutConfigRule, the AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

  • For OrganizationConfigRule, organization config rule cannot be created because you do not have permissions to call IAM GetRole action or create service linked role.

InsufficientPermissions(String), ///

One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

InvalidParameterValue(String), @@ -4716,6 +5633,41 @@ impl Error for PutRemediationConfigurationsError { } } } +/// Errors returned by PutRemediationExceptions +#[derive(Debug, PartialEq)] +pub enum PutRemediationExceptionsError { + ///

One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

+ InvalidParameterValue(String), +} + +impl PutRemediationExceptionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidParameterValueException" => { + return RusotoError::Service( + PutRemediationExceptionsError::InvalidParameterValue(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutRemediationExceptionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutRemediationExceptionsError { + fn description(&self) -> &str { + match *self { + PutRemediationExceptionsError::InvalidParameterValue(ref cause) => cause, + } + } +} /// Errors returned by PutRetentionConfiguration #[derive(Debug, PartialEq)] pub enum PutRetentionConfigurationError { @@ -4821,7 +5773,7 @@ pub enum StartConfigRulesEvaluationError { LimitExceeded(String), ///

One or more AWS Config rules in the request are invalid. Verify that the rule names are correct and try again.

NoSuchConfigRule(String), - ///

The rule is currently being deleted or the rule is deleting your evaluation results. Try your request again later.

+ ///

You see this exception in the following cases:

  • For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.

  • For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.

  • For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

  • For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.

  • For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.

ResourceInUse(String), } @@ -4921,8 +5873,10 @@ impl Error for StartConfigurationRecorderError { /// Errors returned by StartRemediationExecution #[derive(Debug, PartialEq)] pub enum StartRemediationExecutionError { - ///

Indicates one of the following errors:

  • The rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • The AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

+ ///

Indicates one of the following errors:

  • For PutConfigRule, the rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.

  • For PutConfigRule, the AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

  • For OrganizationConfigRule, organization config rule cannot be created because you do not have permissions to call IAM GetRole action or create service linked role.

InsufficientPermissions(String), + ///

One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

+ InvalidParameterValue(String), ///

You specified an AWS Config rule without a remediation configuration.

NoSuchRemediationConfiguration(String), } @@ -4936,6 +5890,11 @@ impl StartRemediationExecutionError { StartRemediationExecutionError::InsufficientPermissions(err.msg), ) } + "InvalidParameterValueException" => { + return RusotoError::Service( + StartRemediationExecutionError::InvalidParameterValue(err.msg), + ) + } "NoSuchRemediationConfigurationException" => { return RusotoError::Service( StartRemediationExecutionError::NoSuchRemediationConfiguration(err.msg), @@ -4957,6 +5916,7 @@ impl Error for StartRemediationExecutionError { fn description(&self) -> &str { match *self { StartRemediationExecutionError::InsufficientPermissions(ref cause) => cause, + StartRemediationExecutionError::InvalidParameterValue(ref cause) => cause, StartRemediationExecutionError::NoSuchRemediationConfiguration(ref cause) => cause, } } @@ -5118,6 +6078,12 @@ pub trait ConfigService { input: DeleteEvaluationResultsRequest, ) -> RusotoFuture; + ///

Deletes the specified organization config rule and all of its evaluation results from all member accounts in that organization. Only a master account can delete an organization config rule.

AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a rule while it is in this state.

+ fn delete_organization_config_rule( + &self, + input: DeleteOrganizationConfigRuleRequest, + ) -> RusotoFuture<(), DeleteOrganizationConfigRuleError>; + ///

Deletes pending authorization requests for a specified aggregator account in a specified region.

fn delete_pending_aggregation_request( &self, @@ -5130,6 +6096,12 @@ pub trait ConfigService { input: DeleteRemediationConfigurationRequest, ) -> RusotoFuture; + ///

Deletes one or more remediation exceptions mentioned in the resource keys.

+ fn delete_remediation_exceptions( + &self, + input: DeleteRemediationExceptionsRequest, + ) -> RusotoFuture; + ///

Deletes the retention configuration.

fn delete_retention_configuration( &self, @@ -5142,7 +6114,7 @@ pub trait ConfigService { input: DeliverConfigSnapshotRequest, ) -> RusotoFuture; - ///

Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant rules.

The results can return an empty result page, but if you have a nextToken, the results are displayed on the next page.

+ ///

Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant rules.

The results can return an empty result page, but if you have a nextToken, the results are displayed on the next page.

fn describe_aggregate_compliance_by_config_rules( &self, input: DescribeAggregateComplianceByConfigRulesRequest, @@ -5229,6 +6201,21 @@ pub trait ConfigService { input: DescribeDeliveryChannelsRequest, ) -> RusotoFuture; + ///

Provides organization config rule deployment status for an organization.

The status is not considered successful until organization config rule is successfully deployed in all the member accounts with an exception of excluded accounts.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

+ fn describe_organization_config_rule_statuses( + &self, + input: DescribeOrganizationConfigRuleStatusesRequest, + ) -> RusotoFuture< + DescribeOrganizationConfigRuleStatusesResponse, + DescribeOrganizationConfigRuleStatusesError, + >; + + ///

Returns a list of organization config rules.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

+ fn describe_organization_config_rules( + &self, + input: DescribeOrganizationConfigRulesRequest, + ) -> RusotoFuture; + ///

Returns a list of all pending aggregation requests.

fn describe_pending_aggregation_requests( &self, @@ -5247,6 +6234,12 @@ pub trait ConfigService { DescribeRemediationConfigurationsError, >; + ///

Returns the details of one or more remediation exceptions. A detailed view of a remediation exception for a set of resources that includes an explanation of an exception and the time when the exception will be deleted. When you specify the limit and the next token, you receive a paginated response.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you request resources in batch. It is only applicable, when you request all resources.

+ fn describe_remediation_exceptions( + &self, + input: DescribeRemediationExceptionsRequest, + ) -> RusotoFuture; + ///

Provides a detailed view of a Remediation Execution for a set of resources including state, timestamps for when steps for the remediation execution occur, and any error messages for steps that have failed. When you specify the limit and the next token, you receive a paginated response.

fn describe_remediation_execution_status( &self, @@ -5262,7 +6255,7 @@ pub trait ConfigService { input: DescribeRetentionConfigurationsRequest, ) -> RusotoFuture; - ///

Returns the evaluation results for the specified AWS Config rule for a specific resource in a rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

The results can return an empty result page. But if you have a nextToken, the results are displayed on the next page.

+ ///

Returns the evaluation results for the specified AWS Config rule for a specific resource in a rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

The results can return an empty result page. But if you have a nextToken, the results are displayed on the next page.

fn get_aggregate_compliance_details_by_config_rule( &self, input: GetAggregateComplianceDetailsByConfigRuleRequest, @@ -5327,6 +6320,15 @@ pub trait ConfigService { input: GetDiscoveredResourceCountsRequest, ) -> RusotoFuture; + ///

Returns detailed status for each member account within an organization for a given organization config rule.

Only a master account can call this API.

+ fn get_organization_config_rule_detailed_status( + &self, + input: GetOrganizationConfigRuleDetailedStatusRequest, + ) -> RusotoFuture< + GetOrganizationConfigRuleDetailedStatusResponse, + GetOrganizationConfigRuleDetailedStatusError, + >; + ///

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval. If you specified a retention period to retain your ConfigurationItems between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config returns the ConfigurationItems for the specified retention period.

The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

fn get_resource_config_history( &self, @@ -5384,12 +6386,24 @@ pub trait ConfigService { input: PutEvaluationsRequest, ) -> RusotoFuture; + ///

Adds or updates organization config rule for your entire organization evaluating whether your AWS resources comply with your desired configurations. Only a master account can create or update an organization config rule.

This API enables organization service access through the EnableAWSServiceAccess action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole action.

You can use this action to create both custom AWS Config rules and AWS managed Config rules. If you are adding a new custom AWS Config rule, you must first create AWS Lambda function in the master account that the rule invokes to evaluate your resources. When you use the PutOrganizationConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed Config rule, specify the rule's identifier for the RuleIdentifier key.

The maximum number of organization config rules that AWS Config supports is 150.

Specify either OrganizationCustomRuleMetadata or OrganizationManagedRuleMetadata.

+ fn put_organization_config_rule( + &self, + input: PutOrganizationConfigRuleRequest, + ) -> RusotoFuture; + ///

Adds or updates the remediation configuration with a specific AWS Config rule with the selected target or action. The API creates the RemediationConfiguration object for the AWS Config rule. The AWS Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

fn put_remediation_configurations( &self, input: PutRemediationConfigurationsRequest, ) -> RusotoFuture; + ///

A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an exisiting exception for a specific resource with a specific AWS Config rule.

+ fn put_remediation_exceptions( + &self, + input: PutRemediationExceptionsRequest, + ) -> RusotoFuture; + ///

Creates and updates the retention configuration with details about retention period (number of days) that AWS Config stores your historical information. The API creates the RetentionConfiguration object and names the object as default. When you have a RetentionConfiguration object named default, calling the API modifies the default object.

Currently, AWS Config supports only one retention configuration per region in your account.

fn put_retention_configuration( &self, @@ -5444,10 +6458,7 @@ impl ConfigServiceClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ConfigServiceClient { - ConfigServiceClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5461,10 +6472,14 @@ impl ConfigServiceClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ConfigServiceClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ConfigServiceClient { + ConfigServiceClient { client, region } } } @@ -5687,6 +6702,32 @@ impl ConfigService for ConfigServiceClient { }) } + ///

Deletes the specified organization config rule and all of its evaluation results from all member accounts in that organization. Only a master account can delete an organization config rule.

AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a rule while it is in this state.

+ fn delete_organization_config_rule( + &self, + input: DeleteOrganizationConfigRuleRequest, + ) -> RusotoFuture<(), DeleteOrganizationConfigRuleError> { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.DeleteOrganizationConfigRule", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteOrganizationConfigRuleError::from_response(response)) + })) + } + }) + } + ///

Deletes pending authorization requests for a specified aggregator account in a specified region.

fn delete_pending_aggregation_request( &self, @@ -5745,6 +6786,35 @@ impl ConfigService for ConfigServiceClient { }) } + ///

Deletes one or more remediation exceptions mentioned in the resource keys.

+ fn delete_remediation_exceptions( + &self, + input: DeleteRemediationExceptionsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.DeleteRemediationExceptions", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteRemediationExceptionsError::from_response(response)) + })) + } + }) + } + ///

Deletes the retention configuration.

fn delete_retention_configuration( &self, @@ -5799,7 +6869,7 @@ impl ConfigService for ConfigServiceClient { }) } - ///

Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant rules.

The results can return an empty result page, but if you have a nextToken, the results are displayed on the next page.

+ ///

Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant rules.

The results can return an empty result page, but if you have a nextToken, the results are displayed on the next page.

fn describe_aggregate_compliance_by_config_rules( &self, input: DescribeAggregateComplianceByConfigRulesRequest, @@ -6172,6 +7242,72 @@ impl ConfigService for ConfigServiceClient { }) } + ///

Provides organization config rule deployment status for an organization.

The status is not considered successful until organization config rule is successfully deployed in all the member accounts with an exception of excluded accounts.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

+ fn describe_organization_config_rule_statuses( + &self, + input: DescribeOrganizationConfigRuleStatusesRequest, + ) -> RusotoFuture< + DescribeOrganizationConfigRuleStatusesResponse, + DescribeOrganizationConfigRuleStatusesError, + > { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.DescribeOrganizationConfigRuleStatuses", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeOrganizationConfigRuleStatusesError::from_response( + response, + )) + })) + } + }) + } + + ///

Returns a list of organization config rules.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

+ fn describe_organization_config_rules( + &self, + input: DescribeOrganizationConfigRulesRequest, + ) -> RusotoFuture + { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.DescribeOrganizationConfigRules", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeOrganizationConfigRulesError::from_response( + response, + )) + })) + } + }) + } + ///

Returns a list of all pending aggregation requests.

fn describe_pending_aggregation_requests( &self, @@ -6240,6 +7376,36 @@ impl ConfigService for ConfigServiceClient { }) } + ///

Returns the details of one or more remediation exceptions. A detailed view of a remediation exception for a set of resources that includes an explanation of an exception and the time when the exception will be deleted. When you specify the limit and the next token, you receive a paginated response.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you request resources in batch. It is only applicable, when you request all resources.

+ fn describe_remediation_exceptions( + &self, + input: DescribeRemediationExceptionsRequest, + ) -> RusotoFuture + { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.DescribeRemediationExceptions", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeRemediationExceptionsError::from_response(response)) + })) + } + }) + } + ///

Provides a detailed view of a Remediation Execution for a set of resources including state, timestamps for when steps for the remediation execution occur, and any error messages for steps that have failed. When you specify the limit and the next token, you receive a paginated response.

fn describe_remediation_execution_status( &self, @@ -6306,7 +7472,7 @@ impl ConfigService for ConfigServiceClient { }) } - ///

Returns the evaluation results for the specified AWS Config rule for a specific resource in a rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

The results can return an empty result page. But if you have a nextToken, the results are displayed on the next page.

+ ///

Returns the evaluation results for the specified AWS Config rule for a specific resource in a rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

The results can return an empty result page. But if you have a nextToken, the results are displayed on the next page.

fn get_aggregate_compliance_details_by_config_rule( &self, input: GetAggregateComplianceDetailsByConfigRuleRequest, @@ -6590,6 +7756,40 @@ impl ConfigService for ConfigServiceClient { }) } + ///

Returns detailed status for each member account within an organization for a given organization config rule.

Only a master account can call this API.

+ fn get_organization_config_rule_detailed_status( + &self, + input: GetOrganizationConfigRuleDetailedStatusRequest, + ) -> RusotoFuture< + GetOrganizationConfigRuleDetailedStatusResponse, + GetOrganizationConfigRuleDetailedStatusError, + > { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.GetOrganizationConfigRuleDetailedStatus", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetOrganizationConfigRuleDetailedStatusError::from_response( + response, + )) + })) + } + }) + } + ///

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval. If you specified a retention period to retain your ConfigurationItems between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config returns the ConfigurationItems for the specified retention period.

The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

fn get_resource_config_history( &self, @@ -6870,6 +8070,35 @@ impl ConfigService for ConfigServiceClient { }) } + ///

Adds or updates organization config rule for your entire organization evaluating whether your AWS resources comply with your desired configurations. Only a master account can create or update an organization config rule.

This API enables organization service access through the EnableAWSServiceAccess action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole action.

You can use this action to create both custom AWS Config rules and AWS managed Config rules. If you are adding a new custom AWS Config rule, you must first create AWS Lambda function in the master account that the rule invokes to evaluate your resources. When you use the PutOrganizationConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed Config rule, specify the rule's identifier for the RuleIdentifier key.

The maximum number of organization config rules that AWS Config supports is 150.

Specify either OrganizationCustomRuleMetadata or OrganizationManagedRuleMetadata.

+ fn put_organization_config_rule( + &self, + input: PutOrganizationConfigRuleRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.PutOrganizationConfigRule", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(PutOrganizationConfigRuleError::from_response(response)) + })) + } + }) + } + ///

Adds or updates the remediation configuration with a specific AWS Config rule with the selected target or action. The API creates the RemediationConfiguration object for the AWS Config rule. The AWS Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

fn put_remediation_configurations( &self, @@ -6899,6 +8128,35 @@ impl ConfigService for ConfigServiceClient { }) } + ///

A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an exisiting exception for a specific resource with a specific AWS Config rule.

+ fn put_remediation_exceptions( + &self, + input: PutRemediationExceptionsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "config", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "StarlingDoveService.PutRemediationExceptions", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(PutRemediationExceptionsError::from_response(response)) + })) + } + }) + } + ///

Creates and updates the retention configuration with details about retention period (number of days) that AWS Config stores your historical information. The API creates the RetentionConfiguration object and names the object as default. When you have a RetentionConfiguration object named default, calling the API modifies the default object.

Currently, AWS Config supports only one retention configuration per region in your account.

fn put_retention_configuration( &self, diff --git a/rusoto/services/connect/Cargo.toml b/rusoto/services/connect/Cargo.toml index 400d8658dfe..a5e75d2b832 100644 --- a/rusoto/services/connect/Cargo.toml +++ b/rusoto/services/connect/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_connect" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/connect/README.md b/rusoto/services/connect/README.md index aef6def4659..7edc12630e3 100644 --- a/rusoto/services/connect/README.md +++ b/rusoto/services/connect/README.md @@ -23,9 +23,16 @@ To use `rusoto_connect` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_connect = "0.40.0" +rusoto_connect = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/connect/src/custom/mod.rs b/rusoto/services/connect/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/connect/src/custom/mod.rs +++ b/rusoto/services/connect/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/connect/src/generated.rs b/rusoto/services/connect/src/generated.rs index 05c36e5d128..2fcaccd57da 100644 --- a/rusoto/services/connect/src/generated.rs +++ b/rusoto/services/connect/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -61,7 +60,7 @@ pub struct CreateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserResponse { ///

The Amazon Resource Name (ARN) of the user account created.

#[serde(rename = "UserArn")] @@ -75,7 +74,7 @@ pub struct CreateUserResponse { ///

The credentials to use for federation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Credentials { ///

An access token generated for a federated user to access Amazon Connect

#[serde(rename = "AccessToken")] @@ -110,7 +109,7 @@ pub struct CurrentMetric { ///

A CurrentMetricData object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CurrentMetricData { ///

The metric in a CurrentMetricData object.

#[serde(rename = "Metric")] @@ -124,7 +123,7 @@ pub struct CurrentMetricData { ///

A CurrentMetricResult object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CurrentMetricResult { ///

The Collections for the CurrentMetricResult object.

#[serde(rename = "Collections")] @@ -157,7 +156,7 @@ pub struct DescribeUserHierarchyGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserHierarchyGroupResponse { ///

Returns a HierarchyGroup object.

#[serde(rename = "HierarchyGroup")] @@ -173,7 +172,7 @@ pub struct DescribeUserHierarchyStructureRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserHierarchyStructureResponse { ///

A HierarchyStructure object.

#[serde(rename = "HierarchyStructure")] @@ -192,7 +191,7 @@ pub struct DescribeUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserResponse { ///

A User object that contains information about the user account and configuration settings.

#[serde(rename = "User")] @@ -202,7 +201,7 @@ pub struct DescribeUserResponse { ///

A Dimensions object that includes the Channel and Queue for the metric.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Dimensions { ///

The channel used for grouping and filters. Only VOICE is supported.

#[serde(rename = "Channel")] @@ -238,7 +237,7 @@ pub struct GetContactAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetContactAttributesResponse { ///

The attributes to update.

#[serde(rename = "Attributes")] @@ -272,7 +271,7 @@ pub struct GetCurrentMetricDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCurrentMetricDataResponse { ///

The time at which CurrentMetricData was retrieved and cached for pagination.

#[serde(rename = "DataSnapshotTime")] @@ -296,7 +295,7 @@ pub struct GetFederationTokenRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFederationTokenResponse { ///

The credentials to use for federation.

#[serde(rename = "Credentials")] @@ -336,7 +335,7 @@ pub struct GetMetricDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMetricDataResponse { ///

A list of HistoricalMetricResult objects, organized by Dimensions, which is the ID of the resource specified in the Filters used for the request. The metrics are combined with the metrics included in Collections, which is a list of HisotricalMetricData objects.

If no Grouping is specified in the request, Collections includes summary data for the HistoricalMetrics.

#[serde(rename = "MetricResults")] @@ -350,7 +349,7 @@ pub struct GetMetricDataResponse { ///

A HierarchyGroup object that contains information about a hierarchy group in your Amazon Connect instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HierarchyGroup { ///

The Amazon Resource Name (ARN) for the hierarchy group.

#[serde(rename = "Arn")] @@ -376,7 +375,7 @@ pub struct HierarchyGroup { ///

A HierarchyGroupSummary object that contains information about the hierarchy group, including ARN, Id, and Name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HierarchyGroupSummary { ///

The ARN for the hierarchy group.

#[serde(rename = "Arn")] @@ -394,7 +393,7 @@ pub struct HierarchyGroupSummary { ///

A HierarchyLevel object that contains information about the levels in a hierarchy group, including ARN, Id, and Name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HierarchyLevel { ///

The ARN for the hierarchy group level.

#[serde(rename = "Arn")] @@ -412,7 +411,7 @@ pub struct HierarchyLevel { ///

A HierarchyPath object that contains information about the levels of the hierarchy group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HierarchyPath { ///

A HierarchyGroupSummary object that contains information about the level of the hierarchy group, including ARN, Id, and Name.

#[serde(rename = "LevelFive")] @@ -438,7 +437,7 @@ pub struct HierarchyPath { ///

A HierarchyStructure object that contains information about the hierarchy group structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HierarchyStructure { ///

A HierarchyLevel object that contains information about the hierarchy group level.

#[serde(rename = "LevelFive")] @@ -485,7 +484,7 @@ pub struct HistoricalMetric { ///

A HistoricalMetricData object than contains a Metric and a Value.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HistoricalMetricData { ///

A HistoricalMetric object.

#[serde(rename = "Metric")] @@ -499,7 +498,7 @@ pub struct HistoricalMetricData { ///

The metrics data returned from a GetMetricData operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HistoricalMetricResult { ///

A list of HistoricalMetricData objects.

#[serde(rename = "Collections")] @@ -527,7 +526,7 @@ pub struct ListRoutingProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRoutingProfilesResponse { ///

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

#[serde(rename = "NextToken")] @@ -555,7 +554,7 @@ pub struct ListSecurityProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSecurityProfilesResponse { ///

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

#[serde(rename = "NextToken")] @@ -583,7 +582,7 @@ pub struct ListUserHierarchyGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUserHierarchyGroupsResponse { ///

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

#[serde(rename = "NextToken")] @@ -611,7 +610,7 @@ pub struct ListUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsersResponse { ///

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

#[serde(rename = "NextToken")] @@ -625,7 +624,7 @@ pub struct ListUsersResponse { ///

A QueueReference object that contains the the QueueId and ARN for the queue resource for which metrics are returned.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueueReference { ///

The Amazon Resource Name (ARN) of queue.

#[serde(rename = "Arn")] @@ -639,7 +638,7 @@ pub struct QueueReference { ///

A RoutingProfileSummary object that contains information about a routing profile, including ARN, Id, and Name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RoutingProfileSummary { ///

The ARN of the routing profile.

#[serde(rename = "Arn")] @@ -657,7 +656,7 @@ pub struct RoutingProfileSummary { ///

A SecurityProfileSummary object that contains information about a security profile, including ARN, Id, Name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityProfileSummary { ///

The ARN of the security profile.

#[serde(rename = "Arn")] @@ -703,7 +702,7 @@ pub struct StartOutboundVoiceContactRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartOutboundVoiceContactResponse { ///

The unique identifier of this contact within your Amazon Connect instance.

#[serde(rename = "ContactId")] @@ -722,7 +721,7 @@ pub struct StopContactRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopContactResponse {} ///

A Threshold object that includes a comparison and ThresholdValue to compare to. Used with service level metrics.

@@ -752,7 +751,7 @@ pub struct UpdateContactAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateContactAttributesResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -823,7 +822,7 @@ pub struct UpdateUserSecurityProfilesRequest { ///

A User object that contains information about a user account in your Amazon Connect instance, including configuration settings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct User { ///

The ARN of the user account.

#[serde(rename = "Arn")] @@ -902,7 +901,7 @@ pub struct UserPhoneConfig { ///

A UserSummary object that contains Information about a user, including ARN, Id, and user name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserSummary { ///

The ARN for the user account.

#[serde(rename = "Arn")] @@ -2380,10 +2379,7 @@ impl ConnectClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ConnectClient { - ConnectClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2397,10 +2393,14 @@ impl ConnectClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ConnectClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ConnectClient { + ConnectClient { client, region } } } diff --git a/rusoto/services/cur/Cargo.toml b/rusoto/services/cur/Cargo.toml index b87b1281223..c25109235a9 100644 --- a/rusoto/services/cur/Cargo.toml +++ b/rusoto/services/cur/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_cur" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/cur/README.md b/rusoto/services/cur/README.md index ec8bd739655..b29db8fddf3 100644 --- a/rusoto/services/cur/README.md +++ b/rusoto/services/cur/README.md @@ -23,9 +23,16 @@ To use `rusoto_cur` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_cur = "0.40.0" +rusoto_cur = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/cur/src/custom/mod.rs b/rusoto/services/cur/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/cur/src/custom/mod.rs +++ b/rusoto/services/cur/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/cur/src/generated.rs b/rusoto/services/cur/src/generated.rs index 944982920da..676a3ee77a4 100644 --- a/rusoto/services/cur/src/generated.rs +++ b/rusoto/services/cur/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -34,7 +33,7 @@ pub struct DeleteReportDefinitionRequest { ///

If the action is successful, the service sends back an HTTP 200 response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteReportDefinitionResponse { #[serde(rename = "ResponseMessage")] #[serde(skip_serializing_if = "Option::is_none")] @@ -54,7 +53,7 @@ pub struct DescribeReportDefinitionsRequest { ///

If the action is successful, the service sends back an HTTP 200 response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReportDefinitionsResponse { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -65,6 +64,18 @@ pub struct DescribeReportDefinitionsResponse { pub report_definitions: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ModifyReportDefinitionRequest { + #[serde(rename = "ReportDefinition")] + pub report_definition: ReportDefinition, + #[serde(rename = "ReportName")] + pub report_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ModifyReportDefinitionResponse {} + ///

Creates a Cost and Usage Report.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutReportDefinitionRequest { @@ -75,7 +86,7 @@ pub struct PutReportDefinitionRequest { ///

If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutReportDefinitionResponse {} ///

The definition of AWS Cost and Usage Report. You can specify the report name, time unit, report format, compression format, S3 bucket, additional artifacts, and schema elements in the definition.

@@ -182,6 +193,41 @@ impl Error for DescribeReportDefinitionsError { } } } +/// Errors returned by ModifyReportDefinition +#[derive(Debug, PartialEq)] +pub enum ModifyReportDefinitionError { + ///

An error on the server occurred during the processing of your request. Try again later.

+ InternalError(String), +} + +impl ModifyReportDefinitionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalErrorException" => { + return RusotoError::Service(ModifyReportDefinitionError::InternalError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ModifyReportDefinitionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ModifyReportDefinitionError { + fn description(&self) -> &str { + match *self { + ModifyReportDefinitionError::InternalError(ref cause) => cause, + } + } +} /// Errors returned by PutReportDefinition #[derive(Debug, PartialEq)] pub enum PutReportDefinitionError { @@ -245,6 +291,12 @@ pub trait CostAndUsageReport { input: DescribeReportDefinitionsRequest, ) -> RusotoFuture; + ///

Allows you to programatically update your report preferences.

+ fn modify_report_definition( + &self, + input: ModifyReportDefinitionRequest, + ) -> RusotoFuture; + ///

Creates a new report using the description that you provide.

fn put_report_definition( &self, @@ -263,10 +315,7 @@ impl CostAndUsageReportClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CostAndUsageReportClient { - CostAndUsageReportClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -280,10 +329,14 @@ impl CostAndUsageReportClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CostAndUsageReportClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CostAndUsageReportClient { + CostAndUsageReportClient { client, region } } } @@ -348,6 +401,37 @@ impl CostAndUsageReport for CostAndUsageReportClient { }) } + ///

Allows you to programatically update your report preferences.

+ fn modify_report_definition( + &self, + input: ModifyReportDefinitionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "cur", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AWSOrigamiServiceGatewayService.ModifyReportDefinition", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ModifyReportDefinitionError::from_response(response)) + }), + ) + } + }) + } + ///

Creates a new report using the description that you provide.

fn put_report_definition( &self, diff --git a/rusoto/services/datapipeline/Cargo.toml b/rusoto/services/datapipeline/Cargo.toml index 706df2d4eed..30690818f06 100644 --- a/rusoto/services/datapipeline/Cargo.toml +++ b/rusoto/services/datapipeline/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_datapipeline" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/datapipeline/README.md b/rusoto/services/datapipeline/README.md index 065956169e3..9cdaa55af53 100644 --- a/rusoto/services/datapipeline/README.md +++ b/rusoto/services/datapipeline/README.md @@ -23,9 +23,16 @@ To use `rusoto_datapipeline` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_datapipeline = "0.40.0" +rusoto_datapipeline = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/datapipeline/src/custom/mod.rs b/rusoto/services/datapipeline/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/datapipeline/src/custom/mod.rs +++ b/rusoto/services/datapipeline/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/datapipeline/src/generated.rs b/rusoto/services/datapipeline/src/generated.rs index 452aeb5a1f6..2050cd7ee58 100644 --- a/rusoto/services/datapipeline/src/generated.rs +++ b/rusoto/services/datapipeline/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -42,7 +41,7 @@ pub struct ActivatePipelineInput { ///

Contains the output of ActivatePipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivatePipelineOutput {} ///

Contains the parameters for AddTags.

@@ -58,7 +57,7 @@ pub struct AddTagsInput { ///

Contains the output of AddTags.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsOutput {} ///

Contains the parameters for CreatePipeline.

@@ -82,7 +81,7 @@ pub struct CreatePipelineInput { ///

Contains the output of CreatePipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePipelineOutput { ///

The ID that AWS Data Pipeline assigns the newly created pipeline. For example, df-06372391ZG65EXAMPLE.

#[serde(rename = "pipelineId")] @@ -103,7 +102,7 @@ pub struct DeactivatePipelineInput { ///

Contains the output of DeactivatePipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeactivatePipelineOutput {} ///

Contains the parameters for DeletePipeline.

@@ -135,7 +134,7 @@ pub struct DescribeObjectsInput { ///

Contains the output of DescribeObjects.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeObjectsOutput { ///

Indicates whether there are more results to return.

#[serde(rename = "hasMoreResults")] @@ -160,7 +159,7 @@ pub struct DescribePipelinesInput { ///

Contains the output of DescribePipelines.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePipelinesOutput { ///

An array of descriptions for the specified pipelines.

#[serde(rename = "pipelineDescriptionList")] @@ -183,7 +182,7 @@ pub struct EvaluateExpressionInput { ///

Contains the output of EvaluateExpression.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EvaluateExpressionOutput { ///

The evaluated expression.

#[serde(rename = "evaluatedExpression")] @@ -220,7 +219,7 @@ pub struct GetPipelineDefinitionInput { ///

Contains the output of GetPipelineDefinition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPipelineDefinitionOutput { ///

The parameter objects used in the pipeline definition.

#[serde(rename = "parameterObjects")] @@ -260,7 +259,7 @@ pub struct ListPipelinesInput { ///

Contains the output of ListPipelines.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPipelinesOutput { ///

Indicates whether there are more results that can be obtained by a subsequent call.

#[serde(rename = "hasMoreResults")] @@ -323,7 +322,7 @@ pub struct ParameterValue { ///

Contains pipeline metadata.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineDescription { ///

Description of the pipeline.

#[serde(rename = "description")] @@ -346,7 +345,7 @@ pub struct PipelineDescription { ///

Contains the name and identifier of a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineIdName { ///

The ID of the pipeline that was assigned by AWS Data Pipeline. This is a string of the form df-297EG78HU43EEXAMPLE.

#[serde(rename = "id")] @@ -390,7 +389,7 @@ pub struct PollForTaskInput { ///

Contains the output of PollForTask.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PollForTaskOutput { ///

The information needed to complete the task that is being assigned to the task runner. One of the fields returned in this object is taskId, which contains an identifier for the task being assigned. The calling task runner uses taskId in subsequent calls to ReportTaskProgress and SetTaskStatus.

#[serde(rename = "taskObject")] @@ -419,7 +418,7 @@ pub struct PutPipelineDefinitionInput { ///

Contains the output of PutPipelineDefinition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutPipelineDefinitionOutput { ///

Indicates whether there were validation errors, and the pipeline definition is stored but cannot be activated until you correct the pipeline and call PutPipelineDefinition to commit the corrected pipeline.

#[serde(rename = "errored")] @@ -468,7 +467,7 @@ pub struct QueryObjectsInput { ///

Contains the output of QueryObjects.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryObjectsOutput { ///

Indicates whether there are more results that can be obtained by a subsequent call.

#[serde(rename = "hasMoreResults")] @@ -497,7 +496,7 @@ pub struct RemoveTagsInput { ///

Contains the output of RemoveTags.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsOutput {} ///

Contains the parameters for ReportTaskProgress.

@@ -514,7 +513,7 @@ pub struct ReportTaskProgressInput { ///

Contains the output of ReportTaskProgress.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReportTaskProgressOutput { ///

If true, the calling task runner should cancel processing of the task. The task runner does not need to call SetTaskStatus for canceled tasks.

#[serde(rename = "canceled")] @@ -539,7 +538,7 @@ pub struct ReportTaskRunnerHeartbeatInput { ///

Contains the output of ReportTaskRunnerHeartbeat.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReportTaskRunnerHeartbeatOutput { ///

Indicates whether the calling task runner should terminate.

#[serde(rename = "terminate")] @@ -597,7 +596,7 @@ pub struct SetTaskStatusInput { ///

Contains the output of SetTaskStatus.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetTaskStatusOutput {} ///

Tags are key/value pairs defined by a user and associated with a pipeline to control access. AWS Data Pipeline allows you to associate ten tags per pipeline. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

@@ -613,7 +612,7 @@ pub struct Tag { ///

Contains information about a pipeline task that is assigned to a task runner.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskObject { ///

The ID of the pipeline task attempt object. AWS Data Pipeline uses this value to track how many times a task is attempted.

#[serde(rename = "attemptId")] @@ -654,7 +653,7 @@ pub struct ValidatePipelineDefinitionInput { ///

Contains the output of ValidatePipelineDefinition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValidatePipelineDefinitionOutput { ///

Indicates whether there were validation errors.

#[serde(rename = "errored")] @@ -671,7 +670,7 @@ pub struct ValidatePipelineDefinitionOutput { ///

Defines a validation error. Validation errors prevent pipeline activation. The set of validation errors that can be returned are defined by AWS Data Pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValidationError { ///

A description of the validation error.

#[serde(rename = "errors")] @@ -685,7 +684,7 @@ pub struct ValidationError { ///

Defines a validation warning. Validation warnings do not prevent pipeline activation. The set of validation warnings that can be returned are defined by AWS Data Pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValidationWarning { ///

The identifier of the object that contains the validation warning.

#[serde(rename = "id")] @@ -1797,10 +1796,7 @@ impl DataPipelineClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DataPipelineClient { - DataPipelineClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1814,10 +1810,14 @@ impl DataPipelineClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DataPipelineClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DataPipelineClient { + DataPipelineClient { client, region } } } diff --git a/rusoto/services/dax/Cargo.toml b/rusoto/services/dax/Cargo.toml index 78438c4059b..6fcc0568478 100644 --- a/rusoto/services/dax/Cargo.toml +++ b/rusoto/services/dax/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_dax" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/dax/README.md b/rusoto/services/dax/README.md index 258a8f21be1..d775814a359 100644 --- a/rusoto/services/dax/README.md +++ b/rusoto/services/dax/README.md @@ -23,9 +23,16 @@ To use `rusoto_dax` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_dax = "0.40.0" +rusoto_dax = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/dax/src/custom/mod.rs b/rusoto/services/dax/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/dax/src/custom/mod.rs +++ b/rusoto/services/dax/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/dax/src/generated.rs b/rusoto/services/dax/src/generated.rs index 7cb61c3dd03..ea20619b74a 100644 --- a/rusoto/services/dax/src/generated.rs +++ b/rusoto/services/dax/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Contains all of the attributes of a specific DAX cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Cluster { ///

The number of nodes in the cluster that are active (i.e., capable of serving requests).

#[serde(rename = "ActiveNodes")] @@ -151,7 +150,7 @@ pub struct CreateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClusterResponse { ///

A description of the DAX cluster that you have created.

#[serde(rename = "Cluster")] @@ -171,7 +170,7 @@ pub struct CreateParameterGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateParameterGroupResponse { ///

Represents the output of a CreateParameterGroup action.

#[serde(rename = "ParameterGroup")] @@ -194,7 +193,7 @@ pub struct CreateSubnetGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSubnetGroupResponse { ///

Represents the output of a CreateSubnetGroup operation.

#[serde(rename = "SubnetGroup")] @@ -221,7 +220,7 @@ pub struct DecreaseReplicationFactorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecreaseReplicationFactorResponse { ///

A description of the DAX cluster, after you have decreased its replication factor.

#[serde(rename = "Cluster")] @@ -237,7 +236,7 @@ pub struct DeleteClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteClusterResponse { ///

A description of the DAX cluster that is being deleted.

#[serde(rename = "Cluster")] @@ -253,7 +252,7 @@ pub struct DeleteParameterGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteParameterGroupResponse { ///

A user-specified message for this action (i.e., a reason for deleting the parameter group).

#[serde(rename = "DeletionMessage")] @@ -269,7 +268,7 @@ pub struct DeleteSubnetGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSubnetGroupResponse { ///

A user-specified message for this action (i.e., a reason for deleting the subnet group).

#[serde(rename = "DeletionMessage")] @@ -294,7 +293,7 @@ pub struct DescribeClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClustersResponse { ///

The descriptions of your DAX clusters, in response to a DescribeClusters request.

#[serde(rename = "Clusters")] @@ -319,7 +318,7 @@ pub struct DescribeDefaultParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDefaultParametersResponse { ///

Provides an identifier to allow retrieval of paginated results.

#[serde(rename = "NextToken")] @@ -364,7 +363,7 @@ pub struct DescribeEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventsResponse { ///

An array of events. Each element in the array represents one event.

#[serde(rename = "Events")] @@ -393,7 +392,7 @@ pub struct DescribeParameterGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeParameterGroupsResponse { ///

Provides an identifier to allow retrieval of paginated results.

#[serde(rename = "NextToken")] @@ -425,7 +424,7 @@ pub struct DescribeParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeParametersResponse { ///

Provides an identifier to allow retrieval of paginated results.

#[serde(rename = "NextToken")] @@ -454,7 +453,7 @@ pub struct DescribeSubnetGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSubnetGroupsResponse { ///

Provides an identifier to allow retrieval of paginated results.

#[serde(rename = "NextToken")] @@ -468,7 +467,7 @@ pub struct DescribeSubnetGroupsResponse { ///

Represents the information required for client programs to connect to the configuration endpoint for a DAX cluster, or to an individual node within the cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Endpoint { ///

The DNS hostname of the endpoint.

#[serde(rename = "Address")] @@ -482,7 +481,7 @@ pub struct Endpoint { ///

Represents a single occurrence of something interesting within the system. Some examples of events are creating a DAX cluster, adding or removing a node, or rebooting a node.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Event { ///

The date and time when the event occurred.

#[serde(rename = "Date")] @@ -517,7 +516,7 @@ pub struct IncreaseReplicationFactorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IncreaseReplicationFactorResponse { ///

A description of the DAX cluster. with its new replication factor.

#[serde(rename = "Cluster")] @@ -537,7 +536,7 @@ pub struct ListTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

If this value is present, there are additional results to be displayed. To retrieve them, call ListTags again, with NextToken set to this value.

#[serde(rename = "NextToken")] @@ -551,7 +550,7 @@ pub struct ListTagsResponse { ///

Represents an individual node within a DAX cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Node { ///

The Availability Zone (AZ) in which the node has been deployed.

#[serde(rename = "AvailabilityZone")] @@ -581,7 +580,7 @@ pub struct Node { ///

Represents a parameter value that is applicable to a particular node type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NodeTypeSpecificValue { ///

A node type to which the parameter value applies.

#[serde(rename = "NodeType")] @@ -595,7 +594,7 @@ pub struct NodeTypeSpecificValue { ///

Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotificationConfiguration { ///

The Amazon Resource Name (ARN) that identifies the topic.

#[serde(rename = "TopicArn")] @@ -609,7 +608,7 @@ pub struct NotificationConfiguration { ///

Describes an individual setting that controls some aspect of DAX behavior.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Parameter { ///

A range of values within which the parameter can be set.

#[serde(rename = "AllowedValues")] @@ -655,7 +654,7 @@ pub struct Parameter { ///

A named set of parameters that are applied to all of the nodes in a DAX cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterGroup { ///

A description of the parameter group.

#[serde(rename = "Description")] @@ -669,7 +668,7 @@ pub struct ParameterGroup { ///

The status of a parameter group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterGroupStatus { ///

The node IDs of one or more nodes to be rebooted.

#[serde(rename = "NodeIdsToReboot")] @@ -709,7 +708,7 @@ pub struct RebootNodeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebootNodeResponse { ///

A description of the DAX cluster after a node has been rebooted.

#[serde(rename = "Cluster")] @@ -719,7 +718,7 @@ pub struct RebootNodeResponse { ///

The description of the server-side encryption status on the specified DAX cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SSEDescription { ///

The current state of server-side encryption:

  • ENABLING - Server-side encryption is being enabled.

  • ENABLED - Server-side encryption is enabled.

  • DISABLING - Server-side encryption is being disabled.

  • DISABLED - Server-side encryption is disabled.

#[serde(rename = "Status")] @@ -737,7 +736,7 @@ pub struct SSESpecification { ///

An individual VPC security group and its status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityGroupMembership { ///

The unique ID for this security group.

#[serde(rename = "SecurityGroupIdentifier")] @@ -751,7 +750,7 @@ pub struct SecurityGroupMembership { ///

Represents the subnet associated with a DAX cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with DAX.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Subnet { ///

The Availability Zone (AZ) for subnet subnet.

#[serde(rename = "SubnetAvailabilityZone")] @@ -765,7 +764,7 @@ pub struct Subnet { ///

Represents the output of one of the following actions:

  • CreateSubnetGroup

  • ModifySubnetGroup

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubnetGroup { ///

The description of the subnet group.

#[serde(rename = "Description")] @@ -809,7 +808,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse { ///

The list of tags that are associated with the DAX resource.

#[serde(rename = "Tags")] @@ -828,7 +827,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse { ///

The tag keys that have been removed from the cluster.

#[serde(rename = "Tags")] @@ -868,7 +867,7 @@ pub struct UpdateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateClusterResponse { ///

A description of the DAX cluster, after it has been modified.

#[serde(rename = "Cluster")] @@ -887,7 +886,7 @@ pub struct UpdateParameterGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateParameterGroupResponse { ///

The parameter group that has been modified.

#[serde(rename = "ParameterGroup")] @@ -911,7 +910,7 @@ pub struct UpdateSubnetGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSubnetGroupResponse { ///

The subnet group that has been modified.

#[serde(rename = "SubnetGroup")] @@ -2503,10 +2502,7 @@ impl DynamodbAcceleratorClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DynamodbAcceleratorClient { - DynamodbAcceleratorClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2520,10 +2516,14 @@ impl DynamodbAcceleratorClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DynamodbAcceleratorClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DynamodbAcceleratorClient { + DynamodbAcceleratorClient { client, region } } } diff --git a/rusoto/services/devicefarm/Cargo.toml b/rusoto/services/devicefarm/Cargo.toml index 4a2af32b1a5..273074ddd75 100644 --- a/rusoto/services/devicefarm/Cargo.toml +++ b/rusoto/services/devicefarm/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_devicefarm" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/devicefarm/README.md b/rusoto/services/devicefarm/README.md index 00020a3f852..6c614b6f7ba 100644 --- a/rusoto/services/devicefarm/README.md +++ b/rusoto/services/devicefarm/README.md @@ -23,9 +23,16 @@ To use `rusoto_devicefarm` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_devicefarm = "0.40.0" +rusoto_devicefarm = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/devicefarm/src/custom/mod.rs b/rusoto/services/devicefarm/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/devicefarm/src/custom/mod.rs +++ b/rusoto/services/devicefarm/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/devicefarm/src/generated.rs b/rusoto/services/devicefarm/src/generated.rs index 4f75ccd4052..7504e053001 100644 --- a/rusoto/services/devicefarm/src/generated.rs +++ b/rusoto/services/devicefarm/src/generated.rs @@ -9,30 +9,29 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

A container for account-level settings within AWS Device Farm.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccountSettings { ///

The AWS account number specified in the AccountSettings container.

#[serde(rename = "awsAccountNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub aws_account_number: Option, - ///

The default number of minutes (at the account level) a test run will execute before it times out. Default value is 60 minutes.

+ ///

The default number of minutes (at the account level) a test run will execute before it times out. The default value is 150 minutes.

#[serde(rename = "defaultJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_job_timeout_minutes: Option, @@ -64,7 +63,7 @@ pub struct AccountSettings { ///

Represents the output of a test. Examples of artifacts include logs and screenshots.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Artifact { ///

The artifact's ARN.

#[serde(rename = "arn")] @@ -78,7 +77,7 @@ pub struct Artifact { #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The artifact's type.

Allowed values include the following:

  • UNKNOWN: An unknown type.

  • SCREENSHOT: The screenshot type.

  • DEVICELOG: The device log type.

  • MESSAGELOG: The message log type.

  • VIDEOLOG: The video log type.

  • RESULTLOG: The result log type.

  • SERVICELOG: The service log type.

  • WEBKITLOG: The web kit log type.

  • INSTRUMENTATIONOUTPUT: The instrumentation type.

  • EXERCISERMONKEYOUTPUT: For Android, the artifact (log) generated by an Android fuzz test.

  • CALABASHJSONOUTPUT: The Calabash JSON output type.

  • CALABASHPRETTYOUTPUT: The Calabash pretty output type.

  • CALABASHSTANDARDOUTPUT: The Calabash standard output type.

  • CALABASHJAVAXMLOUTPUT: The Calabash Java XML output type.

  • AUTOMATIONOUTPUT: The automation output type.

  • APPIUMSERVEROUTPUT: The Appium server output type.

  • APPIUMJAVAOUTPUT: The Appium Java output type.

  • APPIUMJAVAXMLOUTPUT: The Appium Java XML output type.

  • APPIUMPYTHONOUTPUT: The Appium Python output type.

  • APPIUMPYTHONXMLOUTPUT: The Appium Python XML output type.

  • EXPLOREREVENTLOG: The Explorer event log output type.

  • EXPLORERSUMMARYLOG: The Explorer summary log output type.

  • APPLICATIONCRASHREPORT: The application crash report output type.

  • XCTESTLOG: The XCode test output type.

  • VIDEO: The Video output type.

  • CUSTOMERARTIFACT:The Customer Artifact output type.

  • CUSTOMERARTIFACTLOG: The Customer Artifact Log output type.

  • TESTSPECOUTPUT: The Test Spec Output type.

+ ///

The artifact's type.

Allowed values include the following:

  • UNKNOWN: An unknown type.

  • SCREENSHOT: The screenshot type.

  • DEVICELOG: The device log type.

  • MESSAGELOG: The message log type.

  • VIDEOLOG: The video log type.

  • RESULTLOG: The result log type.

  • SERVICELOG: The service log type.

  • WEBKITLOG: The web kit log type.

  • INSTRUMENTATIONOUTPUT: The instrumentation type.

  • EXERCISERMONKEYOUTPUT: For Android, the artifact (log) generated by an Android fuzz test.

  • CALABASHJSONOUTPUT: The Calabash JSON output type.

  • CALABASHPRETTYOUTPUT: The Calabash pretty output type.

  • CALABASHSTANDARDOUTPUT: The Calabash standard output type.

  • CALABASHJAVAXMLOUTPUT: The Calabash Java XML output type.

  • AUTOMATIONOUTPUT: The automation output type.

  • APPIUMSERVEROUTPUT: The Appium server output type.

  • APPIUMJAVAOUTPUT: The Appium Java output type.

  • APPIUMJAVAXMLOUTPUT: The Appium Java XML output type.

  • APPIUMPYTHONOUTPUT: The Appium Python output type.

  • APPIUMPYTHONXMLOUTPUT: The Appium Python XML output type.

  • EXPLOREREVENTLOG: The Explorer event log output type.

  • EXPLORERSUMMARYLOG: The Explorer summary log output type.

  • APPLICATIONCRASHREPORT: The application crash report output type.

  • XCTESTLOG: The Xcode test output type.

  • VIDEO: The Video output type.

  • CUSTOMERARTIFACT:The Customer Artifact output type.

  • CUSTOMERARTIFACTLOG: The Customer Artifact Log output type.

  • TESTSPECOUTPUT: The Test Spec Output type.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -90,7 +89,7 @@ pub struct Artifact { ///

Represents the amount of CPU that an app is using on a physical device.

Note that this does not represent system-wide CPU usage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CPU { ///

The CPU's architecture, for example x86 or ARM.

#[serde(rename = "architecture")] @@ -108,7 +107,7 @@ pub struct CPU { ///

Represents entity counters.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Counters { ///

The number of errored entities.

#[serde(rename = "errored")] @@ -164,7 +163,7 @@ pub struct CreateDevicePoolRequest { ///

Represents the result of a create device pool request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDevicePoolResult { ///

The newly created device pool.

#[serde(rename = "devicePool")] @@ -196,7 +195,7 @@ pub struct CreateInstanceProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInstanceProfileResult { ///

An object containing information about your instance profile.

#[serde(rename = "instanceProfile")] @@ -255,7 +254,7 @@ pub struct CreateNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNetworkProfileResult { ///

The network profile that is returned by the create network profile request.

#[serde(rename = "networkProfile")] @@ -277,7 +276,7 @@ pub struct CreateProjectRequest { ///

Represents the result of a create project request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProjectResult { ///

The newly created project.

#[serde(rename = "project")] @@ -351,7 +350,7 @@ pub struct CreateRemoteAccessSessionRequest { ///

Represents the server response from a request to create a remote access session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRemoteAccessSessionResult { ///

A container that describes the remote access session when the request to create a remote access session is sent.

#[serde(rename = "remoteAccessSession")] @@ -372,14 +371,14 @@ pub struct CreateUploadRequest { ///

The ARN of the project for the upload.

#[serde(rename = "projectArn")] pub project_arn: String, - ///

The upload's upload type.

Must be one of the following values:

  • ANDROID_APP: An Android upload.

  • IOS_APP: An iOS upload.

  • WEB_APP: A web application upload.

  • EXTERNAL_DATA: An external data upload.

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

  • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

  • APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.

  • APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for a web app.

  • APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for a web app.

  • APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for a web app.

  • CALABASH_TEST_PACKAGE: A Calabash test package upload.

  • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

  • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

  • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

  • XCTEST_TEST_PACKAGE: An XCode test package upload.

  • XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.

  • APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.

  • APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.

  • APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.

  • APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.

  • APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.

  • APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.

  • APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.

  • XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

+ ///

The upload's upload type.

Must be one of the following values:

  • ANDROID_APP: An Android upload.

  • IOS_APP: An iOS upload.

  • WEB_APP: A web application upload.

  • EXTERNAL_DATA: An external data upload.

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

  • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

  • APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.

  • APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for a web app.

  • APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for a web app.

  • APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for a web app.

  • CALABASH_TEST_PACKAGE: A Calabash test package upload.

  • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

  • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

  • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

  • XCTEST_TEST_PACKAGE: An Xcode test package upload.

  • XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload.

  • APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.

  • APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.

  • APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.

  • APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.

  • APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.

  • APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.

  • APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.

  • XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload.

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

#[serde(rename = "type")] pub type_: String, } ///

Represents the result of a create upload request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUploadResult { ///

The newly created upload.

#[serde(rename = "upload")] @@ -405,7 +404,7 @@ pub struct CreateVPCEConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateVPCEConfigurationResult { ///

An object containing information about your VPC endpoint configuration.

#[serde(rename = "vpceConfiguration")] @@ -440,7 +439,7 @@ pub struct DeleteDevicePoolRequest { ///

Represents the result of a delete device pool request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDevicePoolResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -451,7 +450,7 @@ pub struct DeleteInstanceProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInstanceProfileResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -462,7 +461,7 @@ pub struct DeleteNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteNetworkProfileResult {} ///

Represents a request to the delete project operation.

@@ -475,20 +474,20 @@ pub struct DeleteProjectRequest { ///

Represents the result of a delete project request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProjectResult {} ///

Represents the request to delete the specified remote access session.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteRemoteAccessSessionRequest { - ///

The Amazon Resource Name (ARN) of the sesssion for which you want to delete remote access.

+ ///

The Amazon Resource Name (ARN) of the session for which you want to delete remote access.

#[serde(rename = "arn")] pub arn: String, } ///

The response from the server when a request is made to delete the remote access session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRemoteAccessSessionResult {} ///

Represents a request to the delete run operation.

@@ -501,7 +500,7 @@ pub struct DeleteRunRequest { ///

Represents the result of a delete run request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRunResult {} ///

Represents a request to the delete upload operation.

@@ -514,7 +513,7 @@ pub struct DeleteUploadRequest { ///

Represents the result of a delete upload request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUploadResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -525,12 +524,12 @@ pub struct DeleteVPCEConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteVPCEConfigurationResult {} ///

Represents a device type that an app is tested against.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Device { ///

The device's ARN.

#[serde(rename = "arn")] @@ -637,7 +636,7 @@ pub struct DeviceFilter { ///

Represents the device instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceInstance { ///

The Amazon Resource Name (ARN) of the device instance.

#[serde(rename = "arn")] @@ -667,7 +666,7 @@ pub struct DeviceInstance { ///

Represents the total (metered or unmetered) minutes used by the resource to run tests. Contains the sum of minutes consumed by all children.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceMinutes { ///

When specified, represents only the sum of metered minutes used by the resource to run tests.

#[serde(rename = "metered")] @@ -685,7 +684,7 @@ pub struct DeviceMinutes { ///

Represents a collection of device types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DevicePool { ///

The device pool's ARN.

#[serde(rename = "arn")] @@ -715,7 +714,7 @@ pub struct DevicePool { ///

Represents a device pool compatibility result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DevicePoolCompatibilityResult { ///

Whether the result was compatible with the device pool.

#[serde(rename = "compatible")] @@ -744,7 +743,7 @@ pub struct DeviceSelectionConfiguration { ///

Contains the run results requested by the device selection configuration as well as how many devices were returned. For an example of the JSON response syntax, see ScheduleRun.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceSelectionResult { ///

The filters in a device selection result.

#[serde(rename = "filters")] @@ -791,7 +790,7 @@ pub struct GetAccountSettingsRequest {} ///

Represents the account settings return values from the GetAccountSettings request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAccountSettingsResult { ///

The account settings.

#[serde(rename = "accountSettings")] @@ -807,7 +806,7 @@ pub struct GetDeviceInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceInstanceResult { ///

An object containing information about your device instance.

#[serde(rename = "deviceInstance")] @@ -833,7 +832,7 @@ pub struct GetDevicePoolCompatibilityRequest { #[serde(rename = "test")] #[serde(skip_serializing_if = "Option::is_none")] pub test: Option, - ///

The test type for the specified device pool.

Allowed values include the following:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The XCode test type.

  • XCTESTUI: The XCode UI test type.

+ ///

The test type for the specified device pool.

Allowed values include the following:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTESTUI: The Xcode UI test type.

#[serde(rename = "testType")] #[serde(skip_serializing_if = "Option::is_none")] pub test_type: Option, @@ -841,7 +840,7 @@ pub struct GetDevicePoolCompatibilityRequest { ///

Represents the result of describe device pool compatibility request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDevicePoolCompatibilityResult { ///

Information about compatible devices.

#[serde(rename = "compatibleDevices")] @@ -863,7 +862,7 @@ pub struct GetDevicePoolRequest { ///

Represents the result of a get device pool request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDevicePoolResult { ///

An object containing information about the requested device pool.

#[serde(rename = "devicePool")] @@ -881,7 +880,7 @@ pub struct GetDeviceRequest { ///

Represents the result of a get device request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceResult { ///

An object containing information about the requested device.

#[serde(rename = "device")] @@ -897,7 +896,7 @@ pub struct GetInstanceProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceProfileResult { ///

An object containing information about your instance profile.

#[serde(rename = "instanceProfile")] @@ -915,7 +914,7 @@ pub struct GetJobRequest { ///

Represents the result of a get job request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobResult { ///

An object containing information about the requested job.

#[serde(rename = "job")] @@ -931,7 +930,7 @@ pub struct GetNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetNetworkProfileResult { ///

The network profile.

#[serde(rename = "networkProfile")] @@ -950,7 +949,7 @@ pub struct GetOfferingStatusRequest { ///

Returns the status result for a device offering.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOfferingStatusResult { ///

When specified, gets the offering status for the current period.

#[serde(rename = "current")] @@ -976,7 +975,7 @@ pub struct GetProjectRequest { ///

Represents the result of a get project request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetProjectResult { ///

The project you wish to get information about.

#[serde(rename = "project")] @@ -994,7 +993,7 @@ pub struct GetRemoteAccessSessionRequest { ///

Represents the response from the server that lists detailed information about the remote access session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRemoteAccessSessionResult { ///

A container that lists detailed information about the remote access session.

#[serde(rename = "remoteAccessSession")] @@ -1012,7 +1011,7 @@ pub struct GetRunRequest { ///

Represents the result of a get run request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRunResult { ///

The run you wish to get results from.

#[serde(rename = "run")] @@ -1030,7 +1029,7 @@ pub struct GetSuiteRequest { ///

Represents the result of a get suite request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSuiteResult { ///

A collection of one or more tests.

#[serde(rename = "suite")] @@ -1048,7 +1047,7 @@ pub struct GetTestRequest { ///

Represents the result of a get test request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTestResult { ///

A test condition that is evaluated.

#[serde(rename = "test")] @@ -1066,7 +1065,7 @@ pub struct GetUploadRequest { ///

Represents the result of a get upload request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUploadResult { ///

An app or a set of one or more tests to upload or that have been uploaded.

#[serde(rename = "upload")] @@ -1082,7 +1081,7 @@ pub struct GetVPCEConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVPCEConfigurationResult { ///

An object containing information about your VPC endpoint configuration.

#[serde(rename = "vpceConfiguration")] @@ -1092,7 +1091,7 @@ pub struct GetVPCEConfigurationResult { ///

Represents information about incompatibility.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IncompatibilityMessage { ///

A message about the incompatibility.

#[serde(rename = "message")] @@ -1117,7 +1116,7 @@ pub struct InstallToRemoteAccessSessionRequest { ///

Represents the response from the server after AWS Device Farm makes a request to install to a remote access session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstallToRemoteAccessSessionResult { ///

An app to upload or that has been uploaded.

#[serde(rename = "appUpload")] @@ -1127,7 +1126,7 @@ pub struct InstallToRemoteAccessSessionResult { ///

Represents the instance profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceProfile { ///

The Amazon Resource Name (ARN) of the instance profile.

#[serde(rename = "arn")] @@ -1157,7 +1156,7 @@ pub struct InstanceProfile { ///

Represents a device.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

The job's ARN.

#[serde(rename = "arn")] @@ -1207,7 +1206,7 @@ pub struct Job { #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option, - ///

The job's type.

Allowed values include the following:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby test type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The XCode test type.

  • XCTESTUI: The XCode UI test type.

+ ///

The job's type.

Allowed values include the following:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby test type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTESTUI: The Xcode UI test type.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -1238,7 +1237,7 @@ pub struct ListArtifactsRequest { ///

Represents the result of a list artifacts operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListArtifactsResult { ///

Information about the artifacts.

#[serde(rename = "artifacts")] @@ -1263,7 +1262,7 @@ pub struct ListDeviceInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeviceInstancesResult { ///

An object containing information about your device instances.

#[serde(rename = "deviceInstances")] @@ -1293,7 +1292,7 @@ pub struct ListDevicePoolsRequest { ///

Represents the result of a list device pools request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevicePoolsResult { ///

Information about the device pools.

#[serde(rename = "devicePools")] @@ -1312,7 +1311,7 @@ pub struct ListDevicesRequest { #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option, - ///

Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.

  • Attribute: The aspect of a device such as platform or model used as the selction criteria in a device filter.

    Allowed values include:

    • ARN: The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example".

    • PLATFORM: The device platform. Valid values are "ANDROID" or "IOS".

    • OSVERSION: The operating system version. For example, "10.3.2".

    • MODEL: The device model. For example, "iPad 5th Gen".

    • AVAILABILITY: The current availability of the device. Valid values are "AVAILABLE", "HIGHLYAVAILABLE", "BUSY", or "TEMPORARYNOTAVAILABLE".

    • FORMFACTOR: The device form factor. Valid values are "PHONE" or "TABLET".

    • MANUFACTURER: The device manufacturer. For example, "Apple".

    • REMOTEACCESSENABLED: Whether the device is enabled for remote access. Valid values are "TRUE" or "FALSE".

    • REMOTEDEBUGENABLED: Whether the device is enabled for remote debugging. Valid values are "TRUE" or "FALSE".

    • INSTANCEARN: The Amazon Resource Name (ARN) of the device instance.

    • INSTANCELABELS: The label of the device instance.

    • FLEETTYPE: The fleet type. Valid values are "PUBLIC" or "PRIVATE".

  • Operator: The filter operator.

    • The EQUALS operator is available for every attribute except INSTANCELABELS.

    • The CONTAINS operator is available for the INSTANCELABELS and MODEL attributes.

    • The IN and NOTIN operators are available for the ARN, OSVERSION, MODEL, MANUFACTURER, and INSTANCEARN attributes.

    • The LESSTHAN, GREATERTHAN, LESSTHANOREQUALS, and GREATERTHANOREQUALS operators are also available for the OSVERSION attribute.

  • Values: An array of one or more filter values.

    • The IN and NOTIN operators take a values array that has one or more elements.

    • The other operators require an array with a single element.

    • In a request, the AVAILABILITY attribute takes "AVAILABLE", "HIGHLYAVAILABLE", "BUSY", or "TEMPORARYNOTAVAILABLE" as values.

+ ///

Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.

  • Attribute: The aspect of a device such as platform or model used as the selection criteria in a device filter.

    Allowed values include:

    • ARN: The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example".

    • PLATFORM: The device platform. Valid values are "ANDROID" or "IOS".

    • OSVERSION: The operating system version. For example, "10.3.2".

    • MODEL: The device model. For example, "iPad 5th Gen".

    • AVAILABILITY: The current availability of the device. Valid values are "AVAILABLE", "HIGHLYAVAILABLE", "BUSY", or "TEMPORARYNOTAVAILABLE".

    • FORMFACTOR: The device form factor. Valid values are "PHONE" or "TABLET".

    • MANUFACTURER: The device manufacturer. For example, "Apple".

    • REMOTEACCESSENABLED: Whether the device is enabled for remote access. Valid values are "TRUE" or "FALSE".

    • REMOTEDEBUGENABLED: Whether the device is enabled for remote debugging. Valid values are "TRUE" or "FALSE".

    • INSTANCEARN: The Amazon Resource Name (ARN) of the device instance.

    • INSTANCELABELS: The label of the device instance.

    • FLEETTYPE: The fleet type. Valid values are "PUBLIC" or "PRIVATE".

  • Operator: The filter operator.

    • The EQUALS operator is available for every attribute except INSTANCELABELS.

    • The CONTAINS operator is available for the INSTANCELABELS and MODEL attributes.

    • The IN and NOTIN operators are available for the ARN, OSVERSION, MODEL, MANUFACTURER, and INSTANCEARN attributes.

    • The LESSTHAN, GREATERTHAN, LESSTHANOREQUALS, and GREATERTHANOREQUALS operators are also available for the OSVERSION attribute.

  • Values: An array of one or more filter values.

    • The IN and NOTIN operators take a values array that has one or more elements.

    • The other operators require an array with a single element.

    • In a request, the AVAILABILITY attribute takes "AVAILABLE", "HIGHLYAVAILABLE", "BUSY", or "TEMPORARYNOTAVAILABLE" as values.

#[serde(rename = "filters")] #[serde(skip_serializing_if = "Option::is_none")] pub filters: Option>, @@ -1324,7 +1323,7 @@ pub struct ListDevicesRequest { ///

Represents the result of a list devices operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevicesResult { ///

Information about the devices.

#[serde(rename = "devices")] @@ -1349,7 +1348,7 @@ pub struct ListInstanceProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInstanceProfilesResult { ///

An object containing information about your instance profiles.

#[serde(rename = "instanceProfiles")] @@ -1375,7 +1374,7 @@ pub struct ListJobsRequest { ///

Represents the result of a list jobs request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResult { ///

Information about the jobs.

#[serde(rename = "jobs")] @@ -1403,7 +1402,7 @@ pub struct ListNetworkProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListNetworkProfilesResult { ///

A list of the available network profiles.

#[serde(rename = "networkProfiles")] @@ -1424,7 +1423,7 @@ pub struct ListOfferingPromotionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOfferingPromotionsResult { ///

An identifier to be used in the next call to this operation, to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1447,7 +1446,7 @@ pub struct ListOfferingTransactionsRequest { ///

Returns the transaction log of the specified offerings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOfferingTransactionsResult { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1470,7 +1469,7 @@ pub struct ListOfferingsRequest { ///

Represents the return values of the list of offerings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOfferingsResult { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1497,7 +1496,7 @@ pub struct ListProjectsRequest { ///

Represents the result of a list projects request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProjectsResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1523,7 +1522,7 @@ pub struct ListRemoteAccessSessionsRequest { ///

Represents the response from the server after AWS Device Farm makes a request to return information about the remote access session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRemoteAccessSessionsResult { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1549,7 +1548,7 @@ pub struct ListRunsRequest { ///

Represents the result of a list runs request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRunsResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1575,7 +1574,7 @@ pub struct ListSamplesRequest { ///

Represents the result of a list samples request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSamplesResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1601,7 +1600,7 @@ pub struct ListSuitesRequest { ///

Represents the result of a list suites request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSuitesResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1621,7 +1620,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

#[serde(rename = "Tags")] @@ -1643,7 +1642,7 @@ pub struct ListTestsRequest { ///

Represents the result of a list tests request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTestsResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1669,7 +1668,7 @@ pub struct ListUniqueProblemsRequest { ///

Represents the result of a list unique problems request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUniqueProblemsResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1691,7 +1690,7 @@ pub struct ListUploadsRequest { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The type of upload.

Must be one of the following values:

  • ANDROIDAPP: An Android upload.

  • IOSAPP: An iOS upload.

  • WEBAPP: A web appliction upload.

  • EXTERNALDATA: An external data upload.

  • APPIUMJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload.

  • APPIUMJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload.

  • APPIUMPYTHONTESTPACKAGE: An Appium Python test package upload.

  • APPIUMNODETESTPACKAGE: An Appium Node.js test package upload.

  • APPIUMRUBYTESTPACKAGE: An Appium Ruby test package upload.

  • APPIUMWEBJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload for a web app.

  • APPIUMWEBJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload for a web app.

  • APPIUMWEBPYTHONTESTPACKAGE: An Appium Python test package upload for a web app.

  • APPIUMWEBNODETESTPACKAGE: An Appium Node.js test package upload for a web app.

  • APPIUMWEBRUBYTESTPACKAGE: An Appium Ruby test package upload for a web app.

  • CALABASHTESTPACKAGE: A Calabash test package upload.

  • INSTRUMENTATIONTESTPACKAGE: An instrumentation upload.

  • UIAUTOMATIONTESTPACKAGE: A uiautomation test package upload.

  • UIAUTOMATORTESTPACKAGE: A uiautomator test package upload.

  • XCTESTTESTPACKAGE: An XCode test package upload.

  • XCTESTUITESTPACKAGE: An XCode UI test package upload.

  • APPIUMJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload.

  • APPIUMJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload.

  • APPIUMPYTHONTESTSPEC: An Appium Python test spec upload.

  • APPIUMNODETESTSPEC: An Appium Node.js test spec upload.

  • APPIUMRUBYTESTSPEC: An Appium Ruby test spec upload.

  • APPIUMWEBJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUMWEBJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUMWEBPYTHONTESTSPEC: An Appium Python test spec upload for a web app.

  • APPIUMWEBNODETESTSPEC: An Appium Node.js test spec upload for a web app.

  • APPIUMWEBRUBYTESTSPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATIONTESTSPEC: An instrumentation test spec upload.

  • XCTESTUITESTSPEC: An XCode UI test spec upload.

+ ///

The type of upload.

Must be one of the following values:

  • ANDROIDAPP: An Android upload.

  • IOSAPP: An iOS upload.

  • WEBAPP: A web application upload.

  • EXTERNALDATA: An external data upload.

  • APPIUMJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload.

  • APPIUMJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload.

  • APPIUMPYTHONTESTPACKAGE: An Appium Python test package upload.

  • APPIUMNODETESTPACKAGE: An Appium Node.js test package upload.

  • APPIUMRUBYTESTPACKAGE: An Appium Ruby test package upload.

  • APPIUMWEBJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload for a web app.

  • APPIUMWEBJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload for a web app.

  • APPIUMWEBPYTHONTESTPACKAGE: An Appium Python test package upload for a web app.

  • APPIUMWEBNODETESTPACKAGE: An Appium Node.js test package upload for a web app.

  • APPIUMWEBRUBYTESTPACKAGE: An Appium Ruby test package upload for a web app.

  • CALABASHTESTPACKAGE: A Calabash test package upload.

  • INSTRUMENTATIONTESTPACKAGE: An instrumentation upload.

  • UIAUTOMATIONTESTPACKAGE: A uiautomation test package upload.

  • UIAUTOMATORTESTPACKAGE: A uiautomator test package upload.

  • XCTESTTESTPACKAGE: An Xcode test package upload.

  • XCTESTUITESTPACKAGE: An Xcode UI test package upload.

  • APPIUMJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload.

  • APPIUMJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload.

  • APPIUMPYTHONTESTSPEC: An Appium Python test spec upload.

  • APPIUMNODETESTSPEC: An Appium Node.js test spec upload.

  • APPIUMRUBYTESTSPEC: An Appium Ruby test spec upload.

  • APPIUMWEBJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUMWEBJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUMWEBPYTHONTESTSPEC: An Appium Python test spec upload for a web app.

  • APPIUMWEBNODETESTSPEC: An Appium Node.js test spec upload for a web app.

  • APPIUMWEBRUBYTESTSPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATIONTESTSPEC: An instrumentation test spec upload.

  • XCTESTUITESTSPEC: An Xcode UI test spec upload.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -1699,7 +1698,7 @@ pub struct ListUploadsRequest { ///

Represents the result of a list uploads request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUploadsResult { ///

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1724,7 +1723,7 @@ pub struct ListVPCEConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVPCEConfigurationsResult { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "nextToken")] @@ -1749,7 +1748,7 @@ pub struct Location { ///

A number representing the monetary amount for an offering or transaction.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MonetaryAmount { ///

The numerical amount of an offering or transaction.

#[serde(rename = "amount")] @@ -1763,7 +1762,7 @@ pub struct MonetaryAmount { ///

An array of settings that describes characteristics of a network profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkProfile { ///

The Amazon Resource Name (ARN) of the network profile.

#[serde(rename = "arn")] @@ -1817,7 +1816,7 @@ pub struct NetworkProfile { ///

Represents the metadata of a device offering.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Offering { ///

A string describing the offering.

#[serde(rename = "description")] @@ -1843,7 +1842,7 @@ pub struct Offering { ///

Represents information about an offering promotion.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OfferingPromotion { ///

A string describing the offering promotion.

#[serde(rename = "description")] @@ -1857,7 +1856,7 @@ pub struct OfferingPromotion { ///

The status of the offering.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OfferingStatus { ///

The date on which the offering is effective.

#[serde(rename = "effectiveOn")] @@ -1879,7 +1878,7 @@ pub struct OfferingStatus { ///

Represents the metadata of an offering transaction.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OfferingTransaction { ///

The cost of an offering transaction.

#[serde(rename = "cost")] @@ -1905,7 +1904,7 @@ pub struct OfferingTransaction { ///

Represents a specific warning or failure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Problem { ///

Information about the associated device.

#[serde(rename = "device")] @@ -1939,7 +1938,7 @@ pub struct Problem { ///

Information about a problem detail.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProblemDetail { ///

The problem detail's ARN.

#[serde(rename = "arn")] @@ -1953,7 +1952,7 @@ pub struct ProblemDetail { ///

Represents an operating-system neutral workspace for running and managing tests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Project { ///

The project's ARN.

#[serde(rename = "arn")] @@ -1963,7 +1962,7 @@ pub struct Project { #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option, - ///

The default number of minutes (at the project level) a test run will execute before it times out. Default value is 60 minutes.

+ ///

The default number of minutes (at the project level) a test run will execute before it times out. The default value is 150 minutes.

#[serde(rename = "defaultJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_job_timeout_minutes: Option, @@ -1992,7 +1991,7 @@ pub struct PurchaseOfferingRequest { ///

The result of the purchase offering (e.g., success or failure).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PurchaseOfferingResult { ///

Represents the offering transaction for the purchase result.

#[serde(rename = "offeringTransaction")] @@ -2023,7 +2022,7 @@ pub struct Radios { ///

Specifies whether charges for devices will be recurring.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecurringCharge { ///

The cost of the recurring charge.

#[serde(rename = "cost")] @@ -2037,7 +2036,7 @@ pub struct RecurringCharge { ///

Represents information about the remote access session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoteAccessSession { ///

The Amazon Resource Name (ARN) of the remote access session.

#[serde(rename = "arn")] @@ -2059,7 +2058,7 @@ pub struct RemoteAccessSession { #[serde(rename = "device")] #[serde(skip_serializing_if = "Option::is_none")] pub device: Option, - ///

The number of minutes a device is used in a remote access sesssion (including setup and teardown minutes).

+ ///

The number of minutes a device is used in a remote access session (including setup and teardown minutes).

#[serde(rename = "deviceMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub device_minutes: Option, @@ -2140,7 +2139,7 @@ pub struct RenewOfferingRequest { ///

The result of a renewal offering.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RenewOfferingResult { ///

Represents the status of the offering transaction for the renewal.

#[serde(rename = "offeringTransaction")] @@ -2150,7 +2149,7 @@ pub struct RenewOfferingResult { ///

Represents the screen resolution of a device in height and width, expressed in pixels.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resolution { ///

The screen resolution's height, expressed in pixels.

#[serde(rename = "height")] @@ -2181,7 +2180,7 @@ pub struct Rule { ///

Represents a test run on a set of devices with a given app package, test parameters, etc.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Run { ///

An app to upload or that has been uploaded.

#[serde(rename = "appUpload")] @@ -2299,7 +2298,7 @@ pub struct Run { #[serde(rename = "totalJobs")] #[serde(skip_serializing_if = "Option::is_none")] pub total_jobs: Option, - ///

The run's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The XCode test type.

  • XCTESTUI: The XCode UI test type.

+ ///

The run's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTESTUI: The Xcode UI test type.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -2311,7 +2310,7 @@ pub struct Run { ///

Represents a sample of performance data.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Sample { ///

The sample's ARN.

#[serde(rename = "arn")] @@ -2405,7 +2404,7 @@ pub struct ScheduleRunRequest { ///

Represents the result of a schedule run request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduleRunResult { ///

Information about the scheduled run.

#[serde(rename = "run")] @@ -2432,7 +2431,7 @@ pub struct ScheduleRunTest { #[serde(rename = "testSpecArn")] #[serde(skip_serializing_if = "Option::is_none")] pub test_spec_arn: Option, - ///

The test's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The XCode test type.

  • XCTESTUI: The XCode UI test type.

+ ///

The test's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTESTUI: The Xcode UI test type.

#[serde(rename = "type")] pub type_: String, } @@ -2445,7 +2444,7 @@ pub struct StopJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopJobResult { ///

The job that was stopped.

#[serde(rename = "job")] @@ -2463,7 +2462,7 @@ pub struct StopRemoteAccessSessionRequest { ///

Represents the response from the server that describes the remote access session when AWS Device Farm stops the session.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopRemoteAccessSessionResult { ///

A container representing the metadata from the service about the remote access session you are stopping.

#[serde(rename = "remoteAccessSession")] @@ -2481,7 +2480,7 @@ pub struct StopRunRequest { ///

Represents the results of your stop run attempt.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopRunResult { ///

The run that was stopped.

#[serde(rename = "run")] @@ -2491,7 +2490,7 @@ pub struct StopRunResult { ///

Represents a collection of one or more tests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Suite { ///

The suite's ARN.

#[serde(rename = "arn")] @@ -2533,7 +2532,7 @@ pub struct Suite { #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option, - ///

The suite's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The XCode test type.

  • XCTESTUI: The XCode UI test type.

+ ///

The suite's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTESTUI: The Xcode UI test type.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -2561,12 +2560,12 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Represents a condition that is evaluated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Test { ///

The test's ARN.

#[serde(rename = "arn")] @@ -2608,7 +2607,7 @@ pub struct Test { #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option, - ///

The test's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The XCode test type.

  • XCTESTUI: The XCode UI test type.

+ ///

The test's type.

Must be one of the following values:

  • BUILTINFUZZ: The built-in fuzz type.

  • BUILTINEXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUMJAVAJUNIT: The Appium Java JUnit type.

  • APPIUMJAVATESTNG: The Appium Java TestNG type.

  • APPIUMPYTHON: The Appium Python type.

  • APPIUMNODE: The Appium Node.js type.

  • APPIUMRUBY: The Appium Ruby type.

  • APPIUMWEBJAVAJUNIT: The Appium Java JUnit type for web apps.

  • APPIUMWEBJAVATESTNG: The Appium Java TestNG type for web apps.

  • APPIUMWEBPYTHON: The Appium Python type for web apps.

  • APPIUMWEBNODE: The Appium Node.js type for web apps.

  • APPIUMWEBRUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTESTUI: The Xcode UI test type.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -2616,7 +2615,7 @@ pub struct Test { ///

Represents information about free trial device minutes for an AWS account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrialMinutes { ///

The number of free trial minutes remaining in the account.

#[serde(rename = "remaining")] @@ -2630,7 +2629,7 @@ pub struct TrialMinutes { ///

A collection of one or more problems, grouped by their result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UniqueProblem { ///

A message about the unique problems' result.

#[serde(rename = "message")] @@ -2653,7 +2652,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -2672,7 +2671,7 @@ pub struct UpdateDeviceInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeviceInstanceResult { ///

An object containing information about your device instance.

#[serde(rename = "deviceInstance")] @@ -2683,7 +2682,7 @@ pub struct UpdateDeviceInstanceResult { ///

Represents a request to the update device pool operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDevicePoolRequest { - ///

The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to update.

+ ///

The Amazon Resource Name (ARN) of the Device Farm device pool you wish to update.

#[serde(rename = "arn")] pub arn: String, ///

Sets whether the maxDevices parameter applies to your device pool. If you set this parameter to true, the maxDevices parameter does not apply, and Device Farm does not limit the number of devices that it adds to your device pool. In this case, Device Farm adds all available devices that meet the criteria that are specified for the rules parameter.

If you use this parameter in your request, you cannot use the maxDevices parameter in the same request.

@@ -2710,7 +2709,7 @@ pub struct UpdateDevicePoolRequest { ///

Represents the result of an update device pool request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDevicePoolResult { ///

The device pool you just updated.

#[serde(rename = "devicePool")] @@ -2746,7 +2745,7 @@ pub struct UpdateInstanceProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateInstanceProfileResult { ///

An object containing information about your instance profile.

#[serde(rename = "instanceProfile")] @@ -2759,7 +2758,7 @@ pub struct UpdateNetworkProfileRequest { ///

The Amazon Resource Name (ARN) of the project for which you want to update network profile settings.

#[serde(rename = "arn")] pub arn: String, - ///

The descriptoin of the network profile about which you are returning information.

+ ///

The description of the network profile about which you are returning information.

#[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, @@ -2806,7 +2805,7 @@ pub struct UpdateNetworkProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNetworkProfileResult { ///

A list of the available network profiles.

#[serde(rename = "networkProfile")] @@ -2832,7 +2831,7 @@ pub struct UpdateProjectRequest { ///

Represents the result of an update project request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProjectResult { ///

The project you wish to update.

#[serde(rename = "project")] @@ -2860,7 +2859,7 @@ pub struct UpdateUploadRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUploadResult { ///

A test spec uploaded to Device Farm.

#[serde(rename = "upload")] @@ -2892,7 +2891,7 @@ pub struct UpdateVPCEConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateVPCEConfigurationResult { ///

An object containing information about your VPC endpoint configuration.

#[serde(rename = "vpceConfiguration")] @@ -2902,7 +2901,7 @@ pub struct UpdateVPCEConfigurationResult { ///

An app or a set of one or more tests to upload or that have been uploaded.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Upload { ///

The upload's ARN.

#[serde(rename = "arn")] @@ -2936,7 +2935,7 @@ pub struct Upload { #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - ///

The upload's type.

Must be one of the following values:

  • ANDROIDAPP: An Android upload.

  • IOSAPP: An iOS upload.

  • WEBAPP: A web appliction upload.

  • EXTERNALDATA: An external data upload.

  • APPIUMJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload.

  • APPIUMJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload.

  • APPIUMPYTHONTESTPACKAGE: An Appium Python test package upload.

  • APPIUMNODETESTPACKAGE: An Appium Node.js test package upload.

  • APPIUMRUBYTESTPACKAGE: An Appium Ruby test package upload.

  • APPIUMWEBJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload for web apps.

  • APPIUMWEBJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload for web apps.

  • APPIUMWEBPYTHONTESTPACKAGE: An Appium Python test package upload for web apps.

  • APPIUMWEBNODETESTPACKAGE: An Appium Node.js test package upload for web apps.

  • APPIUMWEBRUBYTESTPACKAGE: An Appium Ruby test package upload for web apps.

  • CALABASHTESTPACKAGE: A Calabash test package upload.

  • INSTRUMENTATIONTESTPACKAGE: An instrumentation upload.

  • UIAUTOMATIONTESTPACKAGE: A uiautomation test package upload.

  • UIAUTOMATORTESTPACKAGE: A uiautomator test package upload.

  • XCTESTTESTPACKAGE: An XCode test package upload.

  • XCTESTUITESTPACKAGE: An XCode UI test package upload.

  • APPIUMJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload.

  • APPIUMJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload.

  • APPIUMPYTHONTESTSPEC: An Appium Python test spec upload.

  • APPIUMNODETESTSPEC: An Appium Node.js test spec upload.

  • APPIUMRUBYTESTSPEC: An Appium Ruby test spec upload.

  • APPIUMWEBJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUMWEBJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUMWEBPYTHONTESTSPEC: An Appium Python test spec upload for a web app.

  • APPIUMWEBNODETESTSPEC: An Appium Node.js test spec upload for a web app.

  • APPIUMWEBRUBYTESTSPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATIONTESTSPEC: An instrumentation test spec upload.

  • XCTESTUITESTSPEC: An XCode UI test spec upload.

+ ///

The upload's type.

Must be one of the following values:

  • ANDROIDAPP: An Android upload.

  • IOSAPP: An iOS upload.

  • WEBAPP: A web application upload.

  • EXTERNALDATA: An external data upload.

  • APPIUMJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload.

  • APPIUMJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload.

  • APPIUMPYTHONTESTPACKAGE: An Appium Python test package upload.

  • APPIUMNODETESTPACKAGE: An Appium Node.js test package upload.

  • APPIUMRUBYTESTPACKAGE: An Appium Ruby test package upload.

  • APPIUMWEBJAVAJUNITTESTPACKAGE: An Appium Java JUnit test package upload for web apps.

  • APPIUMWEBJAVATESTNGTESTPACKAGE: An Appium Java TestNG test package upload for web apps.

  • APPIUMWEBPYTHONTESTPACKAGE: An Appium Python test package upload for web apps.

  • APPIUMWEBNODETESTPACKAGE: An Appium Node.js test package upload for web apps.

  • APPIUMWEBRUBYTESTPACKAGE: An Appium Ruby test package upload for web apps.

  • CALABASHTESTPACKAGE: A Calabash test package upload.

  • INSTRUMENTATIONTESTPACKAGE: An instrumentation upload.

  • UIAUTOMATIONTESTPACKAGE: A uiautomation test package upload.

  • UIAUTOMATORTESTPACKAGE: A uiautomator test package upload.

  • XCTESTTESTPACKAGE: An Xcode test package upload.

  • XCTESTUITESTPACKAGE: An Xcode UI test package upload.

  • APPIUMJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload.

  • APPIUMJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload.

  • APPIUMPYTHONTESTSPEC: An Appium Python test spec upload.

  • APPIUMNODETESTSPEC: An Appium Node.js test spec upload.

  • APPIUMRUBYTESTSPEC: An Appium Ruby test spec upload.

  • APPIUMWEBJAVAJUNITTESTSPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUMWEBJAVATESTNGTESTSPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUMWEBPYTHONTESTSPEC: An Appium Python test spec upload for a web app.

  • APPIUMWEBNODETESTSPEC: An Appium Node.js test spec upload for a web app.

  • APPIUMWEBRUBYTESTSPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATIONTESTSPEC: An instrumentation test spec upload.

  • XCTESTUITESTSPEC: An Xcode UI test spec upload.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -2948,7 +2947,7 @@ pub struct Upload { ///

Represents an Amazon Virtual Private Cloud (VPC) endpoint configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VPCEConfiguration { ///

The Amazon Resource Name (ARN) of the VPC endpoint configuration.

#[serde(rename = "arn")] @@ -6826,10 +6825,7 @@ impl DeviceFarmClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DeviceFarmClient { - DeviceFarmClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6843,10 +6839,14 @@ impl DeviceFarmClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DeviceFarmClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DeviceFarmClient { + DeviceFarmClient { client, region } } } diff --git a/rusoto/services/directconnect/Cargo.toml b/rusoto/services/directconnect/Cargo.toml index 584b966bff5..7468271b10a 100644 --- a/rusoto/services/directconnect/Cargo.toml +++ b/rusoto/services/directconnect/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_directconnect" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/directconnect/README.md b/rusoto/services/directconnect/README.md index d2af86072d9..a52759fe0e8 100644 --- a/rusoto/services/directconnect/README.md +++ b/rusoto/services/directconnect/README.md @@ -23,9 +23,16 @@ To use `rusoto_directconnect` in your application, add it as a dependency in you ```toml [dependencies] -rusoto_directconnect = "0.40.0" +rusoto_directconnect = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/directconnect/src/custom/mod.rs b/rusoto/services/directconnect/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/directconnect/src/custom/mod.rs +++ b/rusoto/services/directconnect/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/directconnect/src/generated.rs b/rusoto/services/directconnect/src/generated.rs index 58bb2acdc96..ac2d9745c6e 100644 --- a/rusoto/services/directconnect/src/generated.rs +++ b/rusoto/services/directconnect/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct AcceptDirectConnectGatewayAssociationProposalRequest { ///

The ID of the Direct Connect gateway.

#[serde(rename = "directConnectGatewayId")] pub direct_connect_gateway_id: String, - ///

Overrides the existing Amazon VPC prefixes advertised to the Direct Connect gateway.

+ ///

Overrides the Amazon VPC prefixes advertised to the Direct Connect gateway.

For information about how to set the prefixes, see Allowed Prefixes in the AWS Direct Connect User Guide.

#[serde(rename = "overrideAllowedPrefixesToDirectConnectGateway")] #[serde(skip_serializing_if = "Option::is_none")] pub override_allowed_prefixes_to_direct_connect_gateway: Option>, @@ -42,7 +41,7 @@ pub struct AcceptDirectConnectGatewayAssociationProposalRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptDirectConnectGatewayAssociationProposalResult { #[serde(rename = "directConnectGatewayAssociation")] #[serde(skip_serializing_if = "Option::is_none")] @@ -82,6 +81,10 @@ pub struct AllocateHostedConnectionRequest { ///

The ID of the AWS account ID of the customer for the connection.

#[serde(rename = "ownerAccount")] pub owner_account: String, + ///

The tags to assign to the hosted connection.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The dedicated VLAN provisioned to the hosted connection.

#[serde(rename = "vlan")] pub vlan: i64, @@ -127,7 +130,7 @@ pub struct AllocateTransitVirtualInterfaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AllocateTransitVirtualInterfaceResult { #[serde(rename = "virtualInterface")] #[serde(skip_serializing_if = "Option::is_none")] @@ -166,7 +169,7 @@ pub struct AssociateVirtualInterfaceRequest { ///

Information about the associated gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociatedGateway { ///

The ID of the associated gateway.

#[serde(rename = "id")] @@ -188,7 +191,7 @@ pub struct AssociatedGateway { ///

Information about a BGP peer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BGPPeer { ///

The address family for the BGP peer.

#[serde(rename = "addressFamily")] @@ -202,7 +205,7 @@ pub struct BGPPeer { #[serde(rename = "asn")] #[serde(skip_serializing_if = "Option::is_none")] pub asn: Option, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -236,7 +239,7 @@ pub struct ConfirmConnectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmConnectionResponse { ///

The state of the connection. The following are the possible values:

  • ordering: The initial state of a hosted connection provisioned on an interconnect. The connection stays in the ordering state until the owner of the hosted connection confirms or declines the connection order.

  • requested: The initial state of a standard connection. The connection stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.

  • pending: The connection has been approved and is being initialized.

  • available: The network link is up and the connection is ready for use.

  • down: The network link is down.

  • deleting: The connection is being deleted.

  • deleted: The connection has been deleted.

  • rejected: A hosted connection in the ordering state enters the rejected state if it is deleted by the customer.

  • unknown: The state of the connection is not available.

#[serde(rename = "connectionState")] @@ -260,7 +263,7 @@ pub struct ConfirmPrivateVirtualInterfaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmPrivateVirtualInterfaceResponse { ///

The state of the virtual interface. The following are the possible values:

  • confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.

  • verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.

  • pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.

  • available: A virtual interface that is able to forward traffic.

  • down: A virtual interface that is BGP down.

  • deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.

  • deleted: A virtual interface that cannot forward traffic.

  • rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.

  • unknown: The state of the virtual interface is not available.

#[serde(rename = "virtualInterfaceState")] @@ -276,7 +279,7 @@ pub struct ConfirmPublicVirtualInterfaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmPublicVirtualInterfaceResponse { ///

The state of the virtual interface. The following are the possible values:

  • confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.

  • verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.

  • pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.

  • available: A virtual interface that is able to forward traffic.

  • down: A virtual interface that is BGP down.

  • deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.

  • deleted: A virtual interface that cannot forward traffic.

  • rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.

  • unknown: The state of the virtual interface is not available.

#[serde(rename = "virtualInterfaceState")] @@ -295,7 +298,7 @@ pub struct ConfirmTransitVirtualInterfaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfirmTransitVirtualInterfaceResponse { ///

The state of the virtual interface. The following are the possible values:

  • confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.

  • verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.

  • pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.

  • available: A virtual interface that is able to forward traffic.

  • down: A virtual interface that is BGP down.

  • deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.

  • deleted: A virtual interface that cannot forward traffic.

  • rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.

  • unknown: The state of the virtual interface is not available.

#[serde(rename = "virtualInterfaceState")] @@ -305,7 +308,7 @@ pub struct ConfirmTransitVirtualInterfaceResponse { ///

Information about an AWS Direct Connect connection.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Connection { ///

The Direct Connect endpoint on which the physical connection terminates.

#[serde(rename = "awsDevice")] @@ -363,6 +366,10 @@ pub struct Connection { #[serde(rename = "region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, + ///

Any tags assigned to the connection.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The ID of the VLAN.

#[serde(rename = "vlan")] #[serde(skip_serializing_if = "Option::is_none")] @@ -370,7 +377,7 @@ pub struct Connection { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Connections { ///

The connections.

#[serde(rename = "connections")] @@ -391,7 +398,7 @@ pub struct CreateBGPPeerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBGPPeerResponse { ///

The virtual interface.

#[serde(rename = "virtualInterface")] @@ -414,6 +421,10 @@ pub struct CreateConnectionRequest { ///

The location of the connection.

#[serde(rename = "location")] pub location: String, + ///

The tags to assign to the connection.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -438,7 +449,7 @@ pub struct CreateDirectConnectGatewayAssociationProposalRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDirectConnectGatewayAssociationProposalResult { ///

Information about the Direct Connect gateway proposal.

#[serde(rename = "directConnectGatewayAssociationProposal")] @@ -449,7 +460,7 @@ pub struct CreateDirectConnectGatewayAssociationProposalResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateDirectConnectGatewayAssociationRequest { - ///

The Amazon VPC prefixes to advertise to the Direct Connect gateway

+ ///

The Amazon VPC prefixes to advertise to the Direct Connect gateway

For information about how to set the prefixes, see Allowed Prefixes in the AWS Direct Connect User Guide.

#[serde(rename = "addAllowedPrefixesToDirectConnectGateway")] #[serde(skip_serializing_if = "Option::is_none")] pub add_allowed_prefixes_to_direct_connect_gateway: Option>, @@ -467,7 +478,7 @@ pub struct CreateDirectConnectGatewayAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDirectConnectGatewayAssociationResult { ///

The association to be created.

#[serde(rename = "directConnectGatewayAssociation")] @@ -487,7 +498,7 @@ pub struct CreateDirectConnectGatewayRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDirectConnectGatewayResult { ///

The Direct Connect gateway.

#[serde(rename = "directConnectGateway")] @@ -510,10 +521,18 @@ pub struct CreateInterconnectRequest { ///

The location of the interconnect.

#[serde(rename = "location")] pub location: String, + ///

The tags to assign to the interconnect,

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateLagRequest { + ///

The tags to assign to the child connections of the LAG. Only newly created child connections as the result of creating a LAG connection are assigned the provided tags. The tags are not assigned to an existing connection that is provided via the “connectionId” parameter that will be migrated to the LAG.

+ #[serde(rename = "childConnectionTags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub child_connection_tags: Option>, ///

The ID of an existing connection to migrate to the LAG.

#[serde(rename = "connectionId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -530,6 +549,10 @@ pub struct CreateLagRequest { ///

The number of physical connections initially provisioned and bundled by the LAG.

#[serde(rename = "numberOfConnections")] pub number_of_connections: i64, + ///

The tags to assign to the link aggregation group (LAG).

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -563,7 +586,7 @@ pub struct CreateTransitVirtualInterfaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTransitVirtualInterfaceResult { #[serde(rename = "virtualInterface")] #[serde(skip_serializing_if = "Option::is_none")] @@ -591,7 +614,7 @@ pub struct DeleteBGPPeerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBGPPeerResponse { ///

The virtual interface.

#[serde(rename = "virtualInterface")] @@ -614,7 +637,7 @@ pub struct DeleteDirectConnectGatewayAssociationProposalRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDirectConnectGatewayAssociationProposalResult { ///

The ID of the associated gateway.

#[serde(rename = "directConnectGatewayAssociationProposal")] @@ -640,7 +663,7 @@ pub struct DeleteDirectConnectGatewayAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDirectConnectGatewayAssociationResult { ///

Information about the deleted association.

#[serde(rename = "directConnectGatewayAssociation")] @@ -656,7 +679,7 @@ pub struct DeleteDirectConnectGatewayRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDirectConnectGatewayResult { ///

The Direct Connect gateway.

#[serde(rename = "directConnectGateway")] @@ -672,7 +695,7 @@ pub struct DeleteInterconnectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInterconnectResponse { ///

The state of the interconnect. The following are the possible values:

  • requested: The initial state of an interconnect. The interconnect stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.

  • pending: The interconnect is approved, and is being initialized.

  • available: The network link is up, and the interconnect is ready for use.

  • down: The network link is down.

  • deleting: The interconnect is being deleted.

  • deleted: The interconnect is deleted.

  • unknown: The state of the interconnect is not available.

#[serde(rename = "interconnectState")] @@ -695,7 +718,7 @@ pub struct DeleteVirtualInterfaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteVirtualInterfaceResponse { ///

The state of the virtual interface. The following are the possible values:

  • confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.

  • verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.

  • pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.

  • available: A virtual interface that is able to forward traffic.

  • down: A virtual interface that is BGP down.

  • deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.

  • deleted: A virtual interface that cannot forward traffic.

  • rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.

  • unknown: The state of the virtual interface is not available.

#[serde(rename = "virtualInterfaceState")] @@ -719,7 +742,7 @@ pub struct DescribeConnectionLoaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConnectionLoaResponse { ///

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA).

#[serde(rename = "loa")] @@ -767,7 +790,7 @@ pub struct DescribeDirectConnectGatewayAssociationProposalsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDirectConnectGatewayAssociationProposalsResult { ///

Describes the Direct Connect gateway association proposals.

#[serde(rename = "directConnectGatewayAssociationProposals")] @@ -809,7 +832,7 @@ pub struct DescribeDirectConnectGatewayAssociationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDirectConnectGatewayAssociationsResult { ///

Information about the associations.

#[serde(rename = "directConnectGatewayAssociations")] @@ -842,7 +865,7 @@ pub struct DescribeDirectConnectGatewayAttachmentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDirectConnectGatewayAttachmentsResult { ///

The attachments.

#[serde(rename = "directConnectGatewayAttachments")] @@ -871,7 +894,7 @@ pub struct DescribeDirectConnectGatewaysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDirectConnectGatewaysResult { ///

The Direct Connect gateways.

#[serde(rename = "directConnectGateways")] @@ -906,7 +929,7 @@ pub struct DescribeInterconnectLoaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInterconnectLoaResponse { ///

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA).

#[serde(rename = "loa")] @@ -953,7 +976,7 @@ pub struct DescribeTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTagsResponse { ///

Information about the tags.

#[serde(rename = "resourceTags")] @@ -975,7 +998,7 @@ pub struct DescribeVirtualInterfacesRequest { ///

Information about a Direct Connect gateway, which enables you to connect virtual interfaces and virtual private gateway or transit gateways.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectConnectGateway { ///

The autonomous system number (ASN) for the Amazon side of the connection.

#[serde(rename = "amazonSideAsn")] @@ -1005,7 +1028,7 @@ pub struct DirectConnectGateway { ///

Information about an association between a Direct Connect gateway and a virtual private gateway or transit gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectConnectGatewayAssociation { ///

The Amazon VPC prefixes to advertise to the Direct Connect gateway.

#[serde(rename = "allowedPrefixesToDirectConnectGateway")] @@ -1051,7 +1074,7 @@ pub struct DirectConnectGatewayAssociation { ///

Information about the proposal request to attach a virtual private gateway to a Direct Connect gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectConnectGatewayAssociationProposal { ///

Information about the associated gateway.

#[serde(rename = "associatedGateway")] @@ -1085,13 +1108,13 @@ pub struct DirectConnectGatewayAssociationProposal { ///

Information about an attachment between a Direct Connect gateway and a virtual interface.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectConnectGatewayAttachment { ///

The state of the attachment. The following are the possible values:

  • attaching: The initial state after a virtual interface is created using the Direct Connect gateway.

  • attached: The Direct Connect gateway and virtual interface are attached and ready to pass traffic.

  • detaching: The initial state after calling DeleteVirtualInterface.

  • detached: The virtual interface is detached from the Direct Connect gateway. Traffic flow between the Direct Connect gateway and virtual interface is stopped.

#[serde(rename = "attachmentState")] #[serde(skip_serializing_if = "Option::is_none")] pub attachment_state: Option, - ///

The type of attachment.

+ ///

The interface type.

#[serde(rename = "attachmentType")] #[serde(skip_serializing_if = "Option::is_none")] pub attachment_type: Option, @@ -1129,7 +1152,7 @@ pub struct DisassociateConnectionFromLagRequest { ///

Information about an interconnect.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Interconnect { ///

The Direct Connect endpoint on which the physical connection terminates.

#[serde(rename = "awsDevice")] @@ -1179,10 +1202,14 @@ pub struct Interconnect { #[serde(rename = "region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, + ///

Any tags assigned to the interconnect.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Interconnects { ///

The interconnects.

#[serde(rename = "interconnects")] @@ -1192,7 +1219,7 @@ pub struct Interconnects { ///

Information about a link aggregation group (LAG).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Lag { ///

Indicates whether the LAG can host other connections.

#[serde(rename = "allowsHostedConnections")] @@ -1254,10 +1281,14 @@ pub struct Lag { #[serde(rename = "region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, + ///

Any tags assigned to link aggregation group (LAG).

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Lags { ///

The LAGs.

#[serde(rename = "lags")] @@ -1267,7 +1298,7 @@ pub struct Lags { ///

Information about a Letter of Authorization - Connecting Facility Assignment (LOA-CFA) for a connection.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Loa { ///

The binary contents of the LOA-CFA document.

#[serde(rename = "loaContent")] @@ -1286,7 +1317,7 @@ pub struct Loa { ///

Information about an AWS Direct Connect location.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Location { ///

The available port speeds for the location.

#[serde(rename = "availablePortSpeeds")] @@ -1307,7 +1338,7 @@ pub struct Location { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Locations { ///

The locations.

#[serde(rename = "locations")] @@ -1330,7 +1361,7 @@ pub struct NewBGPPeer { #[serde(rename = "asn")] #[serde(skip_serializing_if = "Option::is_none")] pub asn: Option, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -1354,7 +1385,7 @@ pub struct NewPrivateVirtualInterface { ///

The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.

#[serde(rename = "asn")] pub asn: i64, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -1370,6 +1401,10 @@ pub struct NewPrivateVirtualInterface { #[serde(rename = "mtu")] #[serde(skip_serializing_if = "Option::is_none")] pub mtu: Option, + ///

Any tags assigned to the private virtual interface.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The ID of the virtual private gateway.

#[serde(rename = "virtualGatewayId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1396,7 +1431,7 @@ pub struct NewPrivateVirtualInterfaceAllocation { ///

The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.

#[serde(rename = "asn")] pub asn: i64, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -1408,6 +1443,10 @@ pub struct NewPrivateVirtualInterfaceAllocation { #[serde(rename = "mtu")] #[serde(skip_serializing_if = "Option::is_none")] pub mtu: Option, + ///

Any tags assigned to the private virtual interface to be provisioned on a connection.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The name of the virtual interface assigned by the customer network.

#[serde(rename = "virtualInterfaceName")] pub virtual_interface_name: String, @@ -1430,7 +1469,7 @@ pub struct NewPublicVirtualInterface { ///

The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.

#[serde(rename = "asn")] pub asn: i64, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -1442,6 +1481,10 @@ pub struct NewPublicVirtualInterface { #[serde(rename = "routeFilterPrefixes")] #[serde(skip_serializing_if = "Option::is_none")] pub route_filter_prefixes: Option>, + ///

Any tags assigned to the public virtual interface.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The name of the virtual interface assigned by the customer network.

#[serde(rename = "virtualInterfaceName")] pub virtual_interface_name: String, @@ -1464,7 +1507,7 @@ pub struct NewPublicVirtualInterfaceAllocation { ///

The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.

#[serde(rename = "asn")] pub asn: i64, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -1476,6 +1519,10 @@ pub struct NewPublicVirtualInterfaceAllocation { #[serde(rename = "routeFilterPrefixes")] #[serde(skip_serializing_if = "Option::is_none")] pub route_filter_prefixes: Option>, + ///

Any tags assigned to the public virtual interface to be provisioned on a connection.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The name of the virtual interface assigned by the customer network.

#[serde(rename = "virtualInterfaceName")] pub virtual_interface_name: String, @@ -1484,7 +1531,7 @@ pub struct NewPublicVirtualInterfaceAllocation { pub vlan: i64, } -///

Information about a transit virtual interface.

+///

Information about the transit virtual interface.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct NewTransitVirtualInterface { ///

The address family for the BGP peer.

@@ -1511,10 +1558,14 @@ pub struct NewTransitVirtualInterface { #[serde(rename = "directConnectGatewayId")] #[serde(skip_serializing_if = "Option::is_none")] pub direct_connect_gateway_id: Option, - ///

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

+ ///

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500.

#[serde(rename = "mtu")] #[serde(skip_serializing_if = "Option::is_none")] pub mtu: Option, + ///

Any tags assigned to the transit virtual interface.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The name of the virtual interface assigned by the customer network.

#[serde(rename = "virtualInterfaceName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1525,7 +1576,7 @@ pub struct NewTransitVirtualInterface { pub vlan: Option, } -///

Information about a transit virtual interface to be provisioned on a connection.

+///

Information about a transit virtual interface.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct NewTransitVirtualInterfaceAllocation { ///

The address family for the BGP peer.

@@ -1548,10 +1599,14 @@ pub struct NewTransitVirtualInterfaceAllocation { #[serde(rename = "customerAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub customer_address: Option, - ///

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

+ ///

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500.

#[serde(rename = "mtu")] #[serde(skip_serializing_if = "Option::is_none")] pub mtu: Option, + ///

Any tags assigned to the transit virtual interface.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The name of the virtual interface assigned by the customer network.

#[serde(rename = "virtualInterfaceName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1564,7 +1619,7 @@ pub struct NewTransitVirtualInterfaceAllocation { ///

Information about a tag associated with an AWS Direct Connect resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceTag { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "resourceArn")] @@ -1602,13 +1657,13 @@ pub struct TagResourceRequest { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "resourceArn")] pub resource_arn: String, - ///

The tags to add.

+ ///

The tags to assign.

#[serde(rename = "tags")] pub tags: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1622,7 +1677,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1642,7 +1697,7 @@ pub struct UpdateDirectConnectGatewayAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDirectConnectGatewayAssociationResult { #[serde(rename = "directConnectGatewayAssociation")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1677,7 +1732,7 @@ pub struct UpdateVirtualInterfaceAttributesRequest { ///

Information about a virtual private gateway for a private virtual interface.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VirtualGateway { ///

The ID of the virtual private gateway.

#[serde(rename = "virtualGatewayId")] @@ -1690,7 +1745,7 @@ pub struct VirtualGateway { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VirtualGateways { ///

The virtual private gateways.

#[serde(rename = "virtualGateways")] @@ -1700,7 +1755,7 @@ pub struct VirtualGateways { ///

Information about a virtual interface.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VirtualInterface { ///

The address family for the BGP peer.

#[serde(rename = "addressFamily")] @@ -1718,7 +1773,7 @@ pub struct VirtualInterface { #[serde(rename = "asn")] #[serde(skip_serializing_if = "Option::is_none")] pub asn: Option, - ///

The authentication key for BGP configuration.

+ ///

The authentication key for BGP configuration. This string has a minimum length of 6 characters and and a maximun lenth of 80 characters.

#[serde(rename = "authKey")] #[serde(skip_serializing_if = "Option::is_none")] pub auth_key: Option, @@ -1770,6 +1825,10 @@ pub struct VirtualInterface { #[serde(rename = "routeFilterPrefixes")] #[serde(skip_serializing_if = "Option::is_none")] pub route_filter_prefixes: Option>, + ///

Any tags assigned to the virtual interface.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The ID of the virtual private gateway. Applies only to private virtual interfaces.

#[serde(rename = "virtualGatewayId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1797,7 +1856,7 @@ pub struct VirtualInterface { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VirtualInterfaces { ///

The virtual interfaces

#[serde(rename = "virtualInterfaces")] @@ -1910,6 +1969,10 @@ pub enum AllocateHostedConnectionError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl AllocateHostedConnectionError { @@ -1926,6 +1989,16 @@ impl AllocateHostedConnectionError { AllocateHostedConnectionError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service(AllocateHostedConnectionError::DuplicateTagKeys( + err.msg, + )) + } + "TooManyTagsException" => { + return RusotoError::Service(AllocateHostedConnectionError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -1943,6 +2016,8 @@ impl Error for AllocateHostedConnectionError { match *self { AllocateHostedConnectionError::DirectConnectClient(ref cause) => cause, AllocateHostedConnectionError::DirectConnectServer(ref cause) => cause, + AllocateHostedConnectionError::DuplicateTagKeys(ref cause) => cause, + AllocateHostedConnectionError::TooManyTags(ref cause) => cause, } } } @@ -1953,6 +2028,10 @@ pub enum AllocatePrivateVirtualInterfaceError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl AllocatePrivateVirtualInterfaceError { @@ -1971,6 +2050,16 @@ impl AllocatePrivateVirtualInterfaceError { AllocatePrivateVirtualInterfaceError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service( + AllocatePrivateVirtualInterfaceError::DuplicateTagKeys(err.msg), + ) + } + "TooManyTagsException" => { + return RusotoError::Service(AllocatePrivateVirtualInterfaceError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -1988,6 +2077,8 @@ impl Error for AllocatePrivateVirtualInterfaceError { match *self { AllocatePrivateVirtualInterfaceError::DirectConnectClient(ref cause) => cause, AllocatePrivateVirtualInterfaceError::DirectConnectServer(ref cause) => cause, + AllocatePrivateVirtualInterfaceError::DuplicateTagKeys(ref cause) => cause, + AllocatePrivateVirtualInterfaceError::TooManyTags(ref cause) => cause, } } } @@ -1998,6 +2089,10 @@ pub enum AllocatePublicVirtualInterfaceError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl AllocatePublicVirtualInterfaceError { @@ -2016,6 +2111,16 @@ impl AllocatePublicVirtualInterfaceError { AllocatePublicVirtualInterfaceError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service( + AllocatePublicVirtualInterfaceError::DuplicateTagKeys(err.msg), + ) + } + "TooManyTagsException" => { + return RusotoError::Service(AllocatePublicVirtualInterfaceError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2033,6 +2138,8 @@ impl Error for AllocatePublicVirtualInterfaceError { match *self { AllocatePublicVirtualInterfaceError::DirectConnectClient(ref cause) => cause, AllocatePublicVirtualInterfaceError::DirectConnectServer(ref cause) => cause, + AllocatePublicVirtualInterfaceError::DuplicateTagKeys(ref cause) => cause, + AllocatePublicVirtualInterfaceError::TooManyTags(ref cause) => cause, } } } @@ -2043,6 +2150,10 @@ pub enum AllocateTransitVirtualInterfaceError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl AllocateTransitVirtualInterfaceError { @@ -2061,6 +2172,16 @@ impl AllocateTransitVirtualInterfaceError { AllocateTransitVirtualInterfaceError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service( + AllocateTransitVirtualInterfaceError::DuplicateTagKeys(err.msg), + ) + } + "TooManyTagsException" => { + return RusotoError::Service(AllocateTransitVirtualInterfaceError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2078,6 +2199,8 @@ impl Error for AllocateTransitVirtualInterfaceError { match *self { AllocateTransitVirtualInterfaceError::DirectConnectClient(ref cause) => cause, AllocateTransitVirtualInterfaceError::DirectConnectServer(ref cause) => cause, + AllocateTransitVirtualInterfaceError::DuplicateTagKeys(ref cause) => cause, + AllocateTransitVirtualInterfaceError::TooManyTags(ref cause) => cause, } } } @@ -2436,6 +2559,10 @@ pub enum CreateConnectionError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl CreateConnectionError { @@ -2452,6 +2579,12 @@ impl CreateConnectionError { err.msg, )) } + "DuplicateTagKeysException" => { + return RusotoError::Service(CreateConnectionError::DuplicateTagKeys(err.msg)) + } + "TooManyTagsException" => { + return RusotoError::Service(CreateConnectionError::TooManyTags(err.msg)) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2469,6 +2602,8 @@ impl Error for CreateConnectionError { match *self { CreateConnectionError::DirectConnectClient(ref cause) => cause, CreateConnectionError::DirectConnectServer(ref cause) => cause, + CreateConnectionError::DuplicateTagKeys(ref cause) => cause, + CreateConnectionError::TooManyTags(ref cause) => cause, } } } @@ -2622,6 +2757,10 @@ pub enum CreateInterconnectError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl CreateInterconnectError { @@ -2638,6 +2777,12 @@ impl CreateInterconnectError { err.msg, )) } + "DuplicateTagKeysException" => { + return RusotoError::Service(CreateInterconnectError::DuplicateTagKeys(err.msg)) + } + "TooManyTagsException" => { + return RusotoError::Service(CreateInterconnectError::TooManyTags(err.msg)) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2655,6 +2800,8 @@ impl Error for CreateInterconnectError { match *self { CreateInterconnectError::DirectConnectClient(ref cause) => cause, CreateInterconnectError::DirectConnectServer(ref cause) => cause, + CreateInterconnectError::DuplicateTagKeys(ref cause) => cause, + CreateInterconnectError::TooManyTags(ref cause) => cause, } } } @@ -2665,6 +2812,10 @@ pub enum CreateLagError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl CreateLagError { @@ -2677,6 +2828,12 @@ impl CreateLagError { "DirectConnectServerException" => { return RusotoError::Service(CreateLagError::DirectConnectServer(err.msg)) } + "DuplicateTagKeysException" => { + return RusotoError::Service(CreateLagError::DuplicateTagKeys(err.msg)) + } + "TooManyTagsException" => { + return RusotoError::Service(CreateLagError::TooManyTags(err.msg)) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2694,6 +2851,8 @@ impl Error for CreateLagError { match *self { CreateLagError::DirectConnectClient(ref cause) => cause, CreateLagError::DirectConnectServer(ref cause) => cause, + CreateLagError::DuplicateTagKeys(ref cause) => cause, + CreateLagError::TooManyTags(ref cause) => cause, } } } @@ -2704,6 +2863,10 @@ pub enum CreatePrivateVirtualInterfaceError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl CreatePrivateVirtualInterfaceError { @@ -2722,6 +2885,16 @@ impl CreatePrivateVirtualInterfaceError { CreatePrivateVirtualInterfaceError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service( + CreatePrivateVirtualInterfaceError::DuplicateTagKeys(err.msg), + ) + } + "TooManyTagsException" => { + return RusotoError::Service(CreatePrivateVirtualInterfaceError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2739,6 +2912,8 @@ impl Error for CreatePrivateVirtualInterfaceError { match *self { CreatePrivateVirtualInterfaceError::DirectConnectClient(ref cause) => cause, CreatePrivateVirtualInterfaceError::DirectConnectServer(ref cause) => cause, + CreatePrivateVirtualInterfaceError::DuplicateTagKeys(ref cause) => cause, + CreatePrivateVirtualInterfaceError::TooManyTags(ref cause) => cause, } } } @@ -2749,6 +2924,10 @@ pub enum CreatePublicVirtualInterfaceError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl CreatePublicVirtualInterfaceError { @@ -2767,6 +2946,16 @@ impl CreatePublicVirtualInterfaceError { CreatePublicVirtualInterfaceError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service( + CreatePublicVirtualInterfaceError::DuplicateTagKeys(err.msg), + ) + } + "TooManyTagsException" => { + return RusotoError::Service(CreatePublicVirtualInterfaceError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2784,6 +2973,8 @@ impl Error for CreatePublicVirtualInterfaceError { match *self { CreatePublicVirtualInterfaceError::DirectConnectClient(ref cause) => cause, CreatePublicVirtualInterfaceError::DirectConnectServer(ref cause) => cause, + CreatePublicVirtualInterfaceError::DuplicateTagKeys(ref cause) => cause, + CreatePublicVirtualInterfaceError::TooManyTags(ref cause) => cause, } } } @@ -2794,6 +2985,10 @@ pub enum CreateTransitVirtualInterfaceError { DirectConnectClient(String), ///

A server-side error occurred.

DirectConnectServer(String), + ///

A tag key was specified more than once.

+ DuplicateTagKeys(String), + ///

You have reached the limit on the number of tags that can be assigned.

+ TooManyTags(String), } impl CreateTransitVirtualInterfaceError { @@ -2812,6 +3007,16 @@ impl CreateTransitVirtualInterfaceError { CreateTransitVirtualInterfaceError::DirectConnectServer(err.msg), ) } + "DuplicateTagKeysException" => { + return RusotoError::Service( + CreateTransitVirtualInterfaceError::DuplicateTagKeys(err.msg), + ) + } + "TooManyTagsException" => { + return RusotoError::Service(CreateTransitVirtualInterfaceError::TooManyTags( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2829,6 +3034,8 @@ impl Error for CreateTransitVirtualInterfaceError { match *self { CreateTransitVirtualInterfaceError::DirectConnectClient(ref cause) => cause, CreateTransitVirtualInterfaceError::DirectConnectServer(ref cause) => cause, + CreateTransitVirtualInterfaceError::DuplicateTagKeys(ref cause) => cause, + CreateTransitVirtualInterfaceError::TooManyTags(ref cause) => cause, } } } @@ -4280,7 +4487,7 @@ pub trait DirectConnect { input: CreatePublicVirtualInterfaceRequest, ) -> RusotoFuture; - ///

Creates a transit virtual interface. A transit virtual interface is a VLAN that transports traffic from a Direct Connect gateway to one or more transit gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

+ ///

Creates a transit virtual interface. A transit virtual interface should be used to access one or more transit gateways associated with Direct Connect gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

If you associate your transit gateway with one or more Direct Connect gateways, the Autonomous System Number (ASN) used by the transit gateway and the Direct Connect gateway must be different. For example, if you use the default ASN 64512 for both your the transit gateway and Direct Connect gateway, the association request fails.

fn create_transit_virtual_interface( &self, input: CreateTransitVirtualInterfaceRequest, @@ -4480,10 +4687,7 @@ impl DirectConnectClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DirectConnectClient { - DirectConnectClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4497,10 +4701,14 @@ impl DirectConnectClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DirectConnectClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DirectConnectClient { + DirectConnectClient { client, region } } } @@ -5139,7 +5347,7 @@ impl DirectConnect for DirectConnectClient { }) } - ///

Creates a transit virtual interface. A transit virtual interface is a VLAN that transports traffic from a Direct Connect gateway to one or more transit gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

+ ///

Creates a transit virtual interface. A transit virtual interface should be used to access one or more transit gateways associated with Direct Connect gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

If you associate your transit gateway with one or more Direct Connect gateways, the Autonomous System Number (ASN) used by the transit gateway and the Direct Connect gateway must be different. For example, if you use the default ASN 64512 for both your the transit gateway and Direct Connect gateway, the association request fails.

fn create_transit_virtual_interface( &self, input: CreateTransitVirtualInterfaceRequest, diff --git a/rusoto/services/discovery/Cargo.toml b/rusoto/services/discovery/Cargo.toml index c57e9a97f2c..cf0e1ab79b6 100644 --- a/rusoto/services/discovery/Cargo.toml +++ b/rusoto/services/discovery/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_discovery" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/discovery/README.md b/rusoto/services/discovery/README.md index 44efed8cc7e..7cb3e23073a 100644 --- a/rusoto/services/discovery/README.md +++ b/rusoto/services/discovery/README.md @@ -23,9 +23,16 @@ To use `rusoto_discovery` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_discovery = "0.40.0" +rusoto_discovery = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/discovery/src/custom/mod.rs b/rusoto/services/discovery/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/discovery/src/custom/mod.rs +++ b/rusoto/services/discovery/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/discovery/src/generated.rs b/rusoto/services/discovery/src/generated.rs index 9cd36d5d08b..e3a5e48e90a 100644 --- a/rusoto/services/discovery/src/generated.rs +++ b/rusoto/services/discovery/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Information about agents or connectors that were instructed to start collecting data. Information includes the agent/connector ID, a description of the operation, and whether the agent/connector configuration was updated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AgentConfigurationStatus { ///

The agent/connector ID.

#[serde(rename = "agentId")] @@ -44,7 +43,7 @@ pub struct AgentConfigurationStatus { ///

Information about agents or connectors associated with the user’s AWS account. Information includes agent or connector IDs, IP addresses, media access control (MAC) addresses, agent or connector health, hostname where the agent or connector resides, and agent version for each agent.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AgentInfo { ///

The agent or connector ID.

#[serde(rename = "agentId")] @@ -90,7 +89,7 @@ pub struct AgentInfo { ///

Network details about the host where the agent/connector resides.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AgentNetworkInfo { ///

The IP address for the host where the agent/connector resides.

#[serde(rename = "ipAddress")] @@ -113,12 +112,12 @@ pub struct AssociateConfigurationItemsToApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateConfigurationItemsToApplicationResponse {} ///

Error messages returned for each import task that you deleted as a response for this command.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiscoveryBatchDeleteImportDataError { ///

The type of error that occurred for a specific import task.

#[serde(rename = "errorCode")] @@ -142,7 +141,7 @@ pub struct BatchDeleteImportDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteImportDataResponse { ///

Error messages returned for each import task that you deleted as a response for this command.

#[serde(rename = "errors")] @@ -152,7 +151,7 @@ pub struct BatchDeleteImportDataResponse { ///

Tags for a configuration item. Tags are metadata that help you categorize IT assets.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigurationTag { ///

The configuration ID for the item to tag. You can specify a list of keys and values.

#[serde(rename = "configurationId")] @@ -178,7 +177,7 @@ pub struct ConfigurationTag { ///

A list of continuous export descriptions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContinuousExportDescription { ///

The type of data collector used to gather this data (currently only offered for AGENT).

#[serde(rename = "dataSource")] @@ -226,7 +225,7 @@ pub struct CreateApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApplicationResponse { ///

Configuration ID of an application to be created.

#[serde(rename = "configurationId")] @@ -245,12 +244,12 @@ pub struct CreateTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTagsResponse {} ///

Inventory data for installed discovery agents.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CustomerAgentInfo { ///

Number of active discovery agents.

#[serde(rename = "activeAgents")] @@ -277,7 +276,7 @@ pub struct CustomerAgentInfo { ///

Inventory data for installed discovery connectors.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CustomerConnectorInfo { ///

Number of active discovery connectors.

#[serde(rename = "activeConnectors")] @@ -310,7 +309,7 @@ pub struct DeleteApplicationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApplicationsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -325,7 +324,7 @@ pub struct DeleteTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTagsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -349,7 +348,7 @@ pub struct DescribeAgentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAgentsResponse { ///

Lists agents or the Connector by ID or lists all agents/Connectors associated with your user account if you did not specify an agent/Connector ID. The output includes agent/Connector IDs, IP addresses, media access control (MAC) addresses, agent/Connector health, host name where the agent/Connector resides, and the version number of each agent/Connector.

#[serde(rename = "agentsInfo")] @@ -369,7 +368,7 @@ pub struct DescribeConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationsResponse { ///

A key in the response map. The value is an array of data.

#[serde(rename = "configurations")] @@ -394,7 +393,7 @@ pub struct DescribeContinuousExportsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeContinuousExportsResponse { ///

A list of continuous export descriptions.

#[serde(rename = "descriptions")] @@ -423,7 +422,7 @@ pub struct DescribeExportConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeExportConfigurationsResponse { ///

#[serde(rename = "exportsInfo")] @@ -456,7 +455,7 @@ pub struct DescribeExportTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeExportTasksResponse { ///

Contains one or more sets of export request details. When the status of a request is SUCCEEDED, the response includes a URL for an Amazon S3 bucket where you can view the data in a CSV file.

#[serde(rename = "exportsInfo")] @@ -485,7 +484,7 @@ pub struct DescribeImportTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeImportTasksResponse { ///

The token to request the next page of results.

#[serde(rename = "nextToken")] @@ -514,7 +513,7 @@ pub struct DescribeTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTagsResponse { ///

The call returns a token. Use this token to get the next set of results.

#[serde(rename = "nextToken")] @@ -537,11 +536,11 @@ pub struct DisassociateConfigurationItemsFromApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateConfigurationItemsFromApplicationResponse {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportConfigurationsResponse { ///

A unique identifier that you can use to query the export status.

#[serde(rename = "exportId")] @@ -565,7 +564,7 @@ pub struct ExportFilter { ///

Information regarding the export status of discovered data. The value is an array of objects.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportInfo { ///

A URL for an Amazon S3 bucket where you can review the exported data. The URL is displayed only if the export succeeded.

#[serde(rename = "configurationsDownloadUrl")] @@ -615,7 +614,7 @@ pub struct Filter { pub struct GetDiscoverySummaryRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDiscoverySummaryResponse { ///

Details about discovered agents, including agent status and health.

#[serde(rename = "agentSummary")] @@ -645,7 +644,7 @@ pub struct GetDiscoverySummaryResponse { ///

An array of information related to the import task request that includes status information, times, IDs, the Amazon S3 Object URL for the import file, and more.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportTask { ///

The total number of application records in the import file that failed to be imported.

#[serde(rename = "applicationImportFailure")] @@ -738,7 +737,7 @@ pub struct ListConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConfigurationsResponse { ///

Returns configuration details, including the configuration ID, attribute names, and attribute values.

#[serde(rename = "configurations")] @@ -774,7 +773,7 @@ pub struct ListServerNeighborsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListServerNeighborsResponse { ///

Count of distinct servers that are one hop away from the given server.

#[serde(rename = "knownDependencyCount")] @@ -791,7 +790,7 @@ pub struct ListServerNeighborsResponse { ///

Details about neighboring servers.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NeighborConnectionDetail { ///

The number of open network connections with the neighboring server.

#[serde(rename = "connectionsCount")] @@ -828,7 +827,7 @@ pub struct OrderByElement { pub struct StartContinuousExportRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartContinuousExportResponse { ///

The type of data collector used to gather this data (currently only offered for AGENT).

#[serde(rename = "dataSource")] @@ -860,7 +859,7 @@ pub struct StartDataCollectionByAgentIdsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartDataCollectionByAgentIdsResponse { ///

Information about agents or the connector that were instructed to start collecting data. Information includes the agent/connector ID, a description of the operation performed, and whether the agent/connector configuration was updated.

#[serde(rename = "agentsConfigurationStatus")] @@ -889,7 +888,7 @@ pub struct StartExportTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartExportTaskResponse { ///

A unique identifier used to query the status of an export request.

#[serde(rename = "exportId")] @@ -912,7 +911,7 @@ pub struct StartImportTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartImportTaskResponse { ///

An array of information related to the import task request including status information, times, IDs, the Amazon S3 Object URL for the import file, and more.

#[serde(rename = "task")] @@ -928,7 +927,7 @@ pub struct StopContinuousExportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopContinuousExportResponse { ///

Timestamp that represents when this continuous export started collecting data.

#[serde(rename = "startTime")] @@ -948,7 +947,7 @@ pub struct StopDataCollectionByAgentIdsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopDataCollectionByAgentIdsResponse { ///

Information about the agents or connector that were instructed to stop collecting data. Information includes the agent/connector ID, a description of the operation performed, and whether the agent/connector configuration was updated.

#[serde(rename = "agentsConfigurationStatus")] @@ -994,7 +993,7 @@ pub struct UpdateApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateApplicationResponse {} /// Errors returned by AssociateConfigurationItemsToApplication @@ -2709,10 +2708,7 @@ impl DiscoveryClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DiscoveryClient { - DiscoveryClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2726,10 +2722,14 @@ impl DiscoveryClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DiscoveryClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DiscoveryClient { + DiscoveryClient { client, region } } } diff --git a/rusoto/services/dms/Cargo.toml b/rusoto/services/dms/Cargo.toml index 4c125018029..1be564bbb84 100644 --- a/rusoto/services/dms/Cargo.toml +++ b/rusoto/services/dms/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_dms" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/dms/README.md b/rusoto/services/dms/README.md index 3a072b24fae..12b58f64761 100644 --- a/rusoto/services/dms/README.md +++ b/rusoto/services/dms/README.md @@ -23,9 +23,16 @@ To use `rusoto_dms` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_dms = "0.40.0" +rusoto_dms = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/dms/src/custom/mod.rs b/rusoto/services/dms/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/dms/src/custom/mod.rs +++ b/rusoto/services/dms/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/dms/src/generated.rs b/rusoto/services/dms/src/generated.rs index ecb515b5d3c..c77b243c84f 100644 --- a/rusoto/services/dms/src/generated.rs +++ b/rusoto/services/dms/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Describes a quota for an AWS account, for example, the number of replication instances allowed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccountQuota { ///

The name of the AWS DMS quota for this AWS account.

#[serde(rename = "AccountQuotaName")] @@ -42,20 +41,20 @@ pub struct AccountQuota { pub used: Option, } -///

+///

Associates a set of tags with an AWS DMS resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct AddTagsToResourceMessage { - ///

The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be added to. AWS DMS resources include a replication instance, endpoint, and a replication task.

+ ///

Identifies the AWS DMS resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN).

For AWS DMS, you can tag a replication instance, an endpoint, or a replication task.

#[serde(rename = "ResourceArn")] pub resource_arn: String, - ///

The tag to be assigned to the DMS resource.

+ ///

One or more tags to be assigned to the resource.

#[serde(rename = "Tags")] pub tags: Vec, } ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsToResourceResponse {} ///

@@ -64,7 +63,7 @@ pub struct ApplyPendingMaintenanceActionMessage { ///

The pending maintenance action to apply to this resource.

#[serde(rename = "ApplyAction")] pub apply_action: String, - ///

A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate cannot be undone.

Valid values:

  • immediate - Apply the maintenance action immediately.

  • next-maintenance - Apply the maintenance action during the next maintenance window for the resource.

  • undo-opt-in - Cancel any existing next-maintenance opt-in requests.

+ ///

A value that specifies the type of opt-in request, or undoes an opt-in request. You can't undo an opt-in request of type immediate.

Valid values:

  • immediate - Apply the maintenance action immediately.

  • next-maintenance - Apply the maintenance action during the next maintenance window for the resource.

  • undo-opt-in - Cancel any existing next-maintenance opt-in requests.

#[serde(rename = "OptInType")] pub opt_in_type: String, ///

The Amazon Resource Name (ARN) of the AWS DMS resource that the pending maintenance action applies to.

@@ -74,7 +73,7 @@ pub struct ApplyPendingMaintenanceActionMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplyPendingMaintenanceActionResponse { ///

The AWS DMS resource that the pending maintenance action will be applied to.

#[serde(rename = "ResourcePendingMaintenanceActions")] @@ -84,7 +83,7 @@ pub struct ApplyPendingMaintenanceActionResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AvailabilityZone { ///

The name of the availability zone.

#[serde(rename = "Name")] @@ -94,7 +93,7 @@ pub struct AvailabilityZone { ///

The SSL certificate that can be used to encrypt connections between the endpoints and the replication instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Certificate { ///

The Amazon Resource Name (ARN) for the certificate.

#[serde(rename = "CertificateArn")] @@ -104,7 +103,7 @@ pub struct Certificate { #[serde(rename = "CertificateCreationDate")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_creation_date: Option, - ///

The customer-assigned name of the certificate. Valid characters are A-z and 0-9.

+ ///

A customer-assigned name for the certificate. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

#[serde(rename = "CertificateIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_identifier: Option, @@ -112,11 +111,11 @@ pub struct Certificate { #[serde(rename = "CertificateOwner")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_owner: Option, - ///

The contents of the .pem X.509 certificate file for the certificate.

+ ///

The contents of a .pem file, which contains an X.509 certificate.

#[serde(rename = "CertificatePem")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_pem: Option, - ///

The location of the imported Oracle Wallet certificate for use with SSL.

+ ///

The location of an imported Oracle Wallet certificate for use with SSL.

#[serde(rename = "CertificateWallet")] #[serde( deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", @@ -145,7 +144,7 @@ pub struct Certificate { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Connection { ///

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

#[serde(rename = "EndpointArn")] @@ -184,7 +183,7 @@ pub struct CreateEndpointMessage { #[serde(rename = "DatabaseName")] #[serde(skip_serializing_if = "Option::is_none")] pub database_name: Option, - ///

The settings in JSON format for the DMS transfer type of source endpoint.

Possible attributes include the following:

  • serviceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket.

  • bucketName - The name of the S3 bucket to use.

  • compressionType - An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE (the default). To keep the files uncompressed, don't use this value.

Shorthand syntax for these attributes is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these attributes is as follows: { "ServiceAccessRoleArn": "string", "BucketName": "string", "CompressionType": "none"|"gzip" }

+ ///

The settings in JSON format for the DMS transfer type of source endpoint.

Possible attributes include the following:

  • serviceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket.

  • bucketName - The name of the S3 bucket to use.

  • compressionType - An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE (the default). To keep the files uncompressed, don't use this value.

Shorthand syntax for these attributes is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these attributes is as follows: { "ServiceAccessRoleArn": "string", "BucketName": "string", "CompressionType": "none"|"gzip" }

#[serde(rename = "DmsTransferSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub dms_transfer_settings: Option, @@ -199,25 +198,25 @@ pub struct CreateEndpointMessage { ///

The database endpoint identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

#[serde(rename = "EndpointIdentifier")] pub endpoint_identifier: String, - ///

The type of endpoint.

+ ///

The type of endpoint. Valid values are source and target.

#[serde(rename = "EndpointType")] pub endpoint_type: String, - ///

The type of engine for the endpoint. Valid values, depending on the EndPointType value, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver.

+ ///

The type of engine for the endpoint. Valid values, depending on the EndpointType value, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver.

#[serde(rename = "EngineName")] pub engine_name: String, ///

The external table definition.

#[serde(rename = "ExternalTableDefinition")] #[serde(skip_serializing_if = "Option::is_none")] pub external_table_definition: Option, - ///

Additional attributes associated with the connection.

+ ///

Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with AWS DMS Endpoints in the AWS Database Migration Service User Guide.

#[serde(rename = "ExtraConnectionAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_connection_attributes: Option, - ///

Settings in JSON format for the target Amazon Kinesis Data Streams endpoint. For more information about the available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

+ ///

Settings in JSON format for the target Amazon Kinesis Data Streams endpoint. For more information about the available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

#[serde(rename = "KinesisSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub kinesis_settings: Option, - ///

The AWS KMS key identifier to use to encrypt the connection parameters. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

+ ///

An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, @@ -248,11 +247,11 @@ pub struct CreateEndpointMessage { #[serde(rename = "ServiceAccessRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub service_access_role_arn: Option, - ///

The Secure Sockets Layer (SSL) mode to use for the SSL connection. The SSL mode can be one of four values: none, require, verify-ca, verify-full. The default value is none.

+ ///

The Secure Sockets Layer (SSL) mode to use for the SSL connection. The default is none

#[serde(rename = "SslMode")] #[serde(skip_serializing_if = "Option::is_none")] pub ssl_mode: Option, - ///

Tags to be added to the endpoint.

+ ///

One or more tags to be assigned to the endpoint.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -264,7 +263,7 @@ pub struct CreateEndpointMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEndpointResponse { ///

The endpoint that was created.

#[serde(rename = "Endpoint")] @@ -279,25 +278,25 @@ pub struct CreateEventSubscriptionMessage { #[serde(rename = "Enabled")] #[serde(skip_serializing_if = "Option::is_none")] pub enabled: Option, - ///

A list of event categories for a source type that you want to subscribe to. You can see a list of the categories for a given source type by calling the DescribeEventCategories action or in the topic Working with Events and Notifications in the AWS Database Migration Service User Guide.

+ ///

A list of event categories for a source type that you want to subscribe to. For more information, see Working with Events and Notifications in the AWS Database Migration Service User Guide.

#[serde(rename = "EventCategories")] #[serde(skip_serializing_if = "Option::is_none")] pub event_categories: Option>, ///

The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

#[serde(rename = "SnsTopicArn")] pub sns_topic_arn: String, - ///

The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

+ ///

A list of identifiers for which AWS DMS provides notification events.

If you don't specify a value, notifications are provided for all sources.

If you specify multiple values, they must be of the same type. For example, if you specify a database instance ID, then all of the other values must be database instance IDs.

#[serde(rename = "SourceIds")] #[serde(skip_serializing_if = "Option::is_none")] pub source_ids: Option>, - ///

The type of AWS DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance. If this value is not specified, all events are returned.

Valid values: replication-instance | migration-task

+ ///

The type of AWS DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance. If this value is not specified, all events are returned.

Valid values: replication-instance | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, - ///

The name of the AWS DMS event notification subscription.

Constraints: The name must be less than 255 characters.

+ ///

The name of the AWS DMS event notification subscription. This name must be less than 255 characters.

#[serde(rename = "SubscriptionName")] pub subscription_name: String, - ///

A tag to be attached to the event subscription.

+ ///

One or more tags to be assigned to the event subscription.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -305,7 +304,7 @@ pub struct CreateEventSubscriptionMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEventSubscriptionResponse { ///

The event subscription that was created.

#[serde(rename = "EventSubscription")] @@ -320,11 +319,11 @@ pub struct CreateReplicationInstanceMessage { #[serde(rename = "AllocatedStorage")] #[serde(skip_serializing_if = "Option::is_none")] pub allocated_storage: Option, - ///

Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.

Default: true

+ ///

Indicates whether minor engine upgrades will be applied automatically to the replication instance during the maintenance window. This parameter defaults to true.

Default: true

#[serde(rename = "AutoMinorVersionUpgrade")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_minor_version_upgrade: Option, - ///

The EC2 Availability Zone that the replication instance will be created in.

Default: A random, system-chosen Availability Zone in the endpoint's region.

Example: us-east-1d

+ ///

The AWS Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's AWS Region, for example: us-east-1d

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, @@ -336,15 +335,15 @@ pub struct CreateReplicationInstanceMessage { #[serde(rename = "EngineVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_version: Option, - ///

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

+ ///

An AWS KMS key identifier that is used to encrypt the data on the replication instance.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, - ///

Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

+ ///

Specifies whether the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

#[serde(rename = "MultiAZ")] #[serde(skip_serializing_if = "Option::is_none")] pub multi_az: Option, - ///

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

+ ///

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

#[serde(rename = "PreferredMaintenanceWindow")] #[serde(skip_serializing_if = "Option::is_none")] pub preferred_maintenance_window: Option, @@ -362,7 +361,7 @@ pub struct CreateReplicationInstanceMessage { #[serde(rename = "ReplicationSubnetGroupIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_subnet_group_identifier: Option, - ///

Tags to be associated with the replication instance.

+ ///

One or more tags to be assigned to the replication instance.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -374,7 +373,7 @@ pub struct CreateReplicationInstanceMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateReplicationInstanceResponse { ///

The replication instance that was created.

#[serde(rename = "ReplicationInstance")] @@ -391,10 +390,10 @@ pub struct CreateReplicationSubnetGroupMessage { ///

The name for the replication subnet group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be "default".

Example: mySubnetgroup

#[serde(rename = "ReplicationSubnetGroupIdentifier")] pub replication_subnet_group_identifier: String, - ///

The EC2 subnet IDs for the subnet group.

+ ///

One or more subnet IDs to be assigned to the subnet group.

#[serde(rename = "SubnetIds")] pub subnet_ids: Vec, - ///

The tag to be assigned to the subnet group.

+ ///

One or more tags to be assigned to the subnet group.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -402,7 +401,7 @@ pub struct CreateReplicationSubnetGroupMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateReplicationSubnetGroupResponse { ///

The replication subnet group that was created.

#[serde(rename = "ReplicationSubnetGroup")] @@ -425,37 +424,37 @@ pub struct CreateReplicationTaskMessage { #[serde(rename = "CdcStopPosition")] #[serde(skip_serializing_if = "Option::is_none")] pub cdc_stop_position: Option, - ///

The migration type.

+ ///

The migration type. Valid values: full-load | cdc | full-load-and-cdc

#[serde(rename = "MigrationType")] pub migration_type: String, - ///

The Amazon Resource Name (ARN) of the replication instance.

+ ///

The Amazon Resource Name (ARN) of a replication instance.

#[serde(rename = "ReplicationInstanceArn")] pub replication_instance_arn: String, - ///

The replication task identifier.

Constraints:

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

+ ///

An identifier for the replication task.

Constraints:

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

#[serde(rename = "ReplicationTaskIdentifier")] pub replication_task_identifier: String, - ///

Settings for the task, such as target metadata settings. For a complete list of task settings, see Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.

+ ///

Overall settings for the task, in JSON format. For more information, see Task Settings in the AWS Database Migration User Guide.

#[serde(rename = "ReplicationTaskSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_task_settings: Option, - ///

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

+ ///

An Amazon Resource Name (ARN) that uniquely identifies the source endpoint.

#[serde(rename = "SourceEndpointArn")] pub source_endpoint_arn: String, - ///

When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with "file://". When working with the DMS API, provide the JSON as the parameter value.

For example, --table-mappings file://mappingfile.json

+ ///

The table mappings for the task, in JSON format. For more information, see Table Mapping in the AWS Database Migration User Guide.

#[serde(rename = "TableMappings")] pub table_mappings: String, - ///

Tags to be added to the replication instance.

+ ///

One or more tags to be assigned to the replication task.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, - ///

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

+ ///

An Amazon Resource Name (ARN) that uniquely identifies the target endpoint.

#[serde(rename = "TargetEndpointArn")] pub target_endpoint_arn: String, } ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateReplicationTaskResponse { ///

The replication task that was created.

#[serde(rename = "ReplicationTask")] @@ -471,7 +470,7 @@ pub struct DeleteCertificateMessage { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCertificateResponse { ///

The Secure Sockets Layer (SSL) certificate.

#[serde(rename = "Certificate")] @@ -489,7 +488,7 @@ pub struct DeleteEndpointMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEndpointResponse { ///

The endpoint that was deleted.

#[serde(rename = "Endpoint")] @@ -507,7 +506,7 @@ pub struct DeleteEventSubscriptionMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEventSubscriptionResponse { ///

The event subscription that was deleted.

#[serde(rename = "EventSubscription")] @@ -525,7 +524,7 @@ pub struct DeleteReplicationInstanceMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteReplicationInstanceResponse { ///

The replication instance that was deleted.

#[serde(rename = "ReplicationInstance")] @@ -543,7 +542,7 @@ pub struct DeleteReplicationSubnetGroupMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteReplicationSubnetGroupResponse {} ///

@@ -556,7 +555,7 @@ pub struct DeleteReplicationTaskMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteReplicationTaskResponse { ///

The deleted replication task.

#[serde(rename = "ReplicationTask")] @@ -570,12 +569,16 @@ pub struct DescribeAccountAttributesMessage {} ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAccountAttributesResponse { ///

Account quota information.

#[serde(rename = "AccountQuotas")] #[serde(skip_serializing_if = "Option::is_none")] pub account_quotas: Option>, + ///

A unique AWS DMS identifier for an account in a particular AWS Region. The value of this identifier has the following format: c99999999999. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given AWS Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this default S3 bucket: dms-111122223333-c44445555666.

AWS DMS supports UniqueAccountIdentifier in versions 3.1.4 and later.

+ #[serde(rename = "UniqueAccountIdentifier")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unique_account_identifier: Option, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -584,7 +587,7 @@ pub struct DescribeCertificatesMessage { #[serde(rename = "Filters")] #[serde(skip_serializing_if = "Option::is_none")] pub filters: Option>, - ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

+ ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the vlue specified by MaxRecords.

#[serde(rename = "Marker")] #[serde(skip_serializing_if = "Option::is_none")] pub marker: Option, @@ -595,7 +598,7 @@ pub struct DescribeCertificatesMessage { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCertificatesResponse { ///

The Secure Sockets Layer (SSL) certificates associated with the replication instance.

#[serde(rename = "Certificates")] @@ -626,7 +629,7 @@ pub struct DescribeConnectionsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConnectionsResponse { ///

A description of the connections.

#[serde(rename = "Connections")] @@ -657,13 +660,13 @@ pub struct DescribeEndpointTypesMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointTypesResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] #[serde(skip_serializing_if = "Option::is_none")] pub marker: Option, - ///

The type of endpoints that are supported.

+ ///

The types of endpoints that are supported.

#[serde(rename = "SupportedEndpointTypes")] #[serde(skip_serializing_if = "Option::is_none")] pub supported_endpoint_types: Option>, @@ -688,7 +691,7 @@ pub struct DescribeEndpointsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointsResponse { ///

Endpoint description.

#[serde(rename = "Endpoints")] @@ -707,7 +710,7 @@ pub struct DescribeEventCategoriesMessage { #[serde(rename = "Filters")] #[serde(skip_serializing_if = "Option::is_none")] pub filters: Option>, - ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | migration-task

+ ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, @@ -715,7 +718,7 @@ pub struct DescribeEventCategoriesMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventCategoriesResponse { ///

A list of event categories.

#[serde(rename = "EventCategoryGroupList")] @@ -746,7 +749,7 @@ pub struct DescribeEventSubscriptionsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventSubscriptionsResponse { ///

A list of event subscriptions.

#[serde(rename = "EventSubscriptionsList")] @@ -769,7 +772,7 @@ pub struct DescribeEventsMessage { #[serde(rename = "EndTime")] #[serde(skip_serializing_if = "Option::is_none")] pub end_time: Option, - ///

A list of event categories for a source type that you want to subscribe to.

+ ///

A list of event categories for the source type that you've chosen.

#[serde(rename = "EventCategories")] #[serde(skip_serializing_if = "Option::is_none")] pub event_categories: Option>, @@ -785,11 +788,11 @@ pub struct DescribeEventsMessage { #[serde(rename = "MaxRecords")] #[serde(skip_serializing_if = "Option::is_none")] pub max_records: Option, - ///

The identifier of the event source. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It cannot end with a hyphen or contain two consecutive hyphens.

+ ///

The identifier of an event source.

#[serde(rename = "SourceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub source_identifier: Option, - ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | migration-task

+ ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, @@ -801,7 +804,7 @@ pub struct DescribeEventsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventsResponse { ///

The events described.

#[serde(rename = "Events")] @@ -828,7 +831,7 @@ pub struct DescribeOrderableReplicationInstancesMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOrderableReplicationInstancesResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -855,7 +858,7 @@ pub struct DescribePendingMaintenanceActionsMessage { #[serde(rename = "MaxRecords")] #[serde(skip_serializing_if = "Option::is_none")] pub max_records: Option, - ///

The ARN of the replication instance.

+ ///

The Amazon Resource Name (ARN) of the replication instance.

#[serde(rename = "ReplicationInstanceArn")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_instance_arn: Option, @@ -863,7 +866,7 @@ pub struct DescribePendingMaintenanceActionsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePendingMaintenanceActionsResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -885,7 +888,7 @@ pub struct DescribeRefreshSchemasStatusMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRefreshSchemasStatusResponse { ///

The status of the schema.

#[serde(rename = "RefreshSchemasStatus")] @@ -909,7 +912,7 @@ pub struct DescribeReplicationInstanceTaskLogsMessage { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReplicationInstanceTaskLogsResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -944,7 +947,7 @@ pub struct DescribeReplicationInstancesMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReplicationInstancesResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -975,7 +978,7 @@ pub struct DescribeReplicationSubnetGroupsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReplicationSubnetGroupsResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -1006,7 +1009,7 @@ pub struct DescribeReplicationTaskAssessmentResultsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReplicationTaskAssessmentResultsResponse { ///

- The Amazon S3 bucket where the task assessment report is located.

#[serde(rename = "BucketName")] @@ -1037,7 +1040,7 @@ pub struct DescribeReplicationTasksMessage { #[serde(rename = "MaxRecords")] #[serde(skip_serializing_if = "Option::is_none")] pub max_records: Option, - ///

Set this flag to avoid returning setting information. Use this to reduce overhead when settings are too large. Choose TRUE to use this flag, otherwise choose FALSE (default).

+ ///

An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default).

#[serde(rename = "WithoutSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub without_settings: Option, @@ -1045,7 +1048,7 @@ pub struct DescribeReplicationTasksMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReplicationTasksResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -1075,7 +1078,7 @@ pub struct DescribeSchemasMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSchemasResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -1109,7 +1112,7 @@ pub struct DescribeTableStatisticsMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTableStatisticsResponse { ///

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

#[serde(rename = "Marker")] @@ -1149,7 +1152,7 @@ pub struct DynamoDbSettings { ///

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ElasticsearchSettings { - ///

The endpoint for the ElasticSearch cluster.

+ ///

The endpoint for the Elasticsearch cluster.

#[serde(rename = "EndpointUri")] pub endpoint_uri: String, ///

The maximum number of seconds that DMS retries failed API requests to the Elasticsearch cluster.

@@ -1167,7 +1170,7 @@ pub struct ElasticsearchSettings { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Endpoint { ///

The Amazon Resource Name (ARN) used for SSL connection to the endpoint.

#[serde(rename = "CertificateArn")] @@ -1197,7 +1200,7 @@ pub struct Endpoint { #[serde(rename = "EndpointIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_identifier: Option, - ///

The type of endpoint.

+ ///

The type of endpoint. Valid values are source and target.

#[serde(rename = "EndpointType")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_type: Option, @@ -1205,7 +1208,7 @@ pub struct Endpoint { #[serde(rename = "EngineDisplayName")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_display_name: Option, - ///

The database engine name. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, sybase, dynamodb, mongodb, and sqlserver.

+ ///

The database engine name. Valid values, depending on the EndpointType, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver.

#[serde(rename = "EngineName")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_name: Option, @@ -1225,7 +1228,7 @@ pub struct Endpoint { #[serde(rename = "KinesisSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub kinesis_settings: Option, - ///

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

+ ///

An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, @@ -1237,7 +1240,7 @@ pub struct Endpoint { #[serde(rename = "Port")] #[serde(skip_serializing_if = "Option::is_none")] pub port: Option, - ///

Settings for the Amazon Redshift endpoint

+ ///

Settings for the Amazon Redshift endpoint.

#[serde(rename = "RedshiftSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub redshift_settings: Option, @@ -1253,7 +1256,7 @@ pub struct Endpoint { #[serde(rename = "ServiceAccessRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub service_access_role_arn: Option, - ///

The SSL mode used to connect to the endpoint.

SSL mode can be one of four values: none, require, verify-ca, verify-full.

The default value is none.

+ ///

The SSL mode used to connect to the endpoint. The default value is none.

#[serde(rename = "SslMode")] #[serde(skip_serializing_if = "Option::is_none")] pub ssl_mode: Option, @@ -1269,7 +1272,7 @@ pub struct Endpoint { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Event { ///

The date of the event.

#[serde(rename = "Date")] @@ -1283,11 +1286,11 @@ pub struct Event { #[serde(rename = "Message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option, - ///

The identifier of the event source. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

Constraints:replication instance, endpoint, migration task

+ ///

The identifier of an event source.

#[serde(rename = "SourceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub source_identifier: Option, - ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | endpoint | migration-task

+ ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | endpoint | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, @@ -1295,13 +1298,13 @@ pub struct Event { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventCategoryGroup { - ///

A list of event categories for a SourceType that you want to subscribe to.

+ ///

A list of event categories from a source type that you've chosen.

#[serde(rename = "EventCategories")] #[serde(skip_serializing_if = "Option::is_none")] pub event_categories: Option>, - ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | migration-task

+ ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, @@ -1309,7 +1312,7 @@ pub struct EventCategoryGroup { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventSubscription { ///

The AWS DMS event notification subscription Id.

#[serde(rename = "CustSubscriptionId")] @@ -1335,7 +1338,7 @@ pub struct EventSubscription { #[serde(rename = "SourceIdsList")] #[serde(skip_serializing_if = "Option::is_none")] pub source_ids_list: Option>, - ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | migration-task

+ ///

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, @@ -1362,14 +1365,14 @@ pub struct Filter { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ImportCertificateMessage { - ///

The customer-assigned name of the certificate. Valid characters are A-z and 0-9.

+ ///

A customer-assigned name for the certificate. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

#[serde(rename = "CertificateIdentifier")] pub certificate_identifier: String, - ///

The contents of the .pem X.509 certificate file for the certificate.

+ ///

The contents of a .pem file, which contains an X.509 certificate.

#[serde(rename = "CertificatePem")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_pem: Option, - ///

The location of the imported Oracle Wallet certificate for use with SSL.

+ ///

The location of an imported Oracle Wallet certificate for use with SSL.

#[serde(rename = "CertificateWallet")] #[serde( deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", @@ -1385,7 +1388,7 @@ pub struct ImportCertificateMessage { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportCertificateResponse { ///

The certificate to be uploaded.

#[serde(rename = "Certificate")] @@ -1420,7 +1423,7 @@ pub struct ListTagsForResourceMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

A list of tags for the resource.

#[serde(rename = "TagList")] @@ -1458,11 +1461,11 @@ pub struct ModifyEndpointMessage { #[serde(rename = "EndpointIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_identifier: Option, - ///

The type of endpoint.

+ ///

The type of endpoint. Valid values are source and target.

#[serde(rename = "EndpointType")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_type: Option, - ///

The type of engine for the endpoint. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, sybase, dynamodb, mongodb, and sqlserver.

+ ///

The type of engine for the endpoint. Valid values, depending on the EndpointType, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver.

#[serde(rename = "EngineName")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_name: Option, @@ -1474,7 +1477,7 @@ pub struct ModifyEndpointMessage { #[serde(rename = "ExtraConnectionAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_connection_attributes: Option, - ///

Settings in JSON format for the target Amazon Kinesis Data Streams endpoint. For more information about the available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

+ ///

Settings in JSON format for the target Amazon Kinesis Data Streams endpoint. For more information about the available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

#[serde(rename = "KinesisSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub kinesis_settings: Option, @@ -1505,7 +1508,7 @@ pub struct ModifyEndpointMessage { #[serde(rename = "ServiceAccessRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub service_access_role_arn: Option, - ///

The SSL mode to be used.

SSL mode can be one of four values: none, require, verify-ca, verify-full.

The default value is none.

+ ///

The SSL mode used to connect to the endpoint. The default value is none.

#[serde(rename = "SslMode")] #[serde(skip_serializing_if = "Option::is_none")] pub ssl_mode: Option, @@ -1517,7 +1520,7 @@ pub struct ModifyEndpointMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyEndpointResponse { ///

The modified endpoint.

#[serde(rename = "Endpoint")] @@ -1540,7 +1543,7 @@ pub struct ModifyEventSubscriptionMessage { #[serde(rename = "SnsTopicArn")] #[serde(skip_serializing_if = "Option::is_none")] pub sns_topic_arn: Option, - ///

The type of AWS DMS resource that generates the events you want to subscribe to.

Valid values: replication-instance | migration-task

+ ///

The type of AWS DMS resource that generates the events you want to subscribe to.

Valid values: replication-instance | replication-task

#[serde(rename = "SourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub source_type: Option, @@ -1551,7 +1554,7 @@ pub struct ModifyEventSubscriptionMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyEventSubscriptionResponse { ///

The modified event subscription.

#[serde(rename = "EventSubscription")] @@ -1566,7 +1569,7 @@ pub struct ModifyReplicationInstanceMessage { #[serde(rename = "AllocatedStorage")] #[serde(skip_serializing_if = "Option::is_none")] pub allocated_storage: Option, - ///

Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version.

+ ///

Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage, and the change is asynchronously applied as soon as possible.

This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version.

#[serde(rename = "AllowMajorVersionUpgrade")] #[serde(skip_serializing_if = "Option::is_none")] pub allow_major_version_upgrade: Option, @@ -1582,7 +1585,7 @@ pub struct ModifyReplicationInstanceMessage { #[serde(rename = "EngineVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_version: Option, - ///

Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

+ ///

Specifies whether the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

#[serde(rename = "MultiAZ")] #[serde(skip_serializing_if = "Option::is_none")] pub multi_az: Option, @@ -1609,7 +1612,7 @@ pub struct ModifyReplicationInstanceMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyReplicationInstanceResponse { ///

The modified replication instance.

#[serde(rename = "ReplicationInstance")] @@ -1620,7 +1623,7 @@ pub struct ModifyReplicationInstanceResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ModifyReplicationSubnetGroupMessage { - ///

The description of the replication instance subnet group.

+ ///

A description for the replication instance subnet group.

#[serde(rename = "ReplicationSubnetGroupDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_subnet_group_description: Option, @@ -1634,7 +1637,7 @@ pub struct ModifyReplicationSubnetGroupMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyReplicationSubnetGroupResponse { ///

The modified replication subnet group.

#[serde(rename = "ReplicationSubnetGroup")] @@ -1657,7 +1660,7 @@ pub struct ModifyReplicationTaskMessage { #[serde(rename = "CdcStopPosition")] #[serde(skip_serializing_if = "Option::is_none")] pub cdc_stop_position: Option, - ///

The migration type.

Valid values: full-load | cdc | full-load-and-cdc

+ ///

The migration type. Valid values: full-load | cdc | full-load-and-cdc

#[serde(rename = "MigrationType")] #[serde(skip_serializing_if = "Option::is_none")] pub migration_type: Option, @@ -1672,7 +1675,7 @@ pub struct ModifyReplicationTaskMessage { #[serde(rename = "ReplicationTaskSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_task_settings: Option, - ///

When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with "file://". When working with the DMS API, provide the JSON as the parameter value.

For example, --table-mappings file://mappingfile.json

+ ///

When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://. When working with the DMS API, provide the JSON as the parameter value, for example: --table-mappings file://mappingfile.json

#[serde(rename = "TableMappings")] #[serde(skip_serializing_if = "Option::is_none")] pub table_mappings: Option, @@ -1680,7 +1683,7 @@ pub struct ModifyReplicationTaskMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyReplicationTaskResponse { ///

The replication task that was modified.

#[serde(rename = "ReplicationTask")] @@ -1743,9 +1746,9 @@ pub struct MongoDbSettings { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OrderableReplicationInstance { - ///

List of availability zones for this replication instance.

+ ///

List of Availability Zones for this replication instance.

#[serde(rename = "AvailabilityZones")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zones: Option>, @@ -1769,6 +1772,10 @@ pub struct OrderableReplicationInstance { #[serde(rename = "MinAllocatedStorage")] #[serde(skip_serializing_if = "Option::is_none")] pub min_allocated_storage: Option, + ///

The value returned when the specified EngineVersion of the replication instance is in Beta or test mode. This indicates some features might not work as expected.

AWS DMS supports ReleaseStatus in versions 3.1.4 and later.

+ #[serde(rename = "ReleaseStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub release_status: Option, ///

The compute and memory capacity of the replication instance.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

#[serde(rename = "ReplicationInstanceClass")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1781,7 +1788,7 @@ pub struct OrderableReplicationInstance { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PendingMaintenanceAction { ///

The type of pending maintenance action that is available for the resource.

#[serde(rename = "Action")] @@ -1821,7 +1828,7 @@ pub struct RebootReplicationInstanceMessage { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebootReplicationInstanceResponse { ///

The replication instance that is being rebooted.

#[serde(rename = "ReplicationInstance")] @@ -1832,15 +1839,15 @@ pub struct RebootReplicationInstanceResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RedshiftSettings { - ///

Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be loaded without generating an error. You can choose TRUE or FALSE (default).

This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT parameter. If the date format for the data does not match the DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field.

+ ///

A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be loaded without generating an error. You can choose true or false (the default).

This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field.

#[serde(rename = "AcceptAnyDate")] #[serde(skip_serializing_if = "Option::is_none")] pub accept_any_date: Option, - ///

Code to run after connecting. This should be the code, not a filename.

+ ///

Code to run after connecting. This parameter should contain the code itself, not the name of a file containing the code.

#[serde(rename = "AfterConnectScript")] #[serde(skip_serializing_if = "Option::is_none")] pub after_connect_script: Option, - ///

The location where the CSV files are stored before being uploaded to the S3 bucket.

+ ///

The location where the comma-separated value (.csv) files are stored before being uploaded to the S3 bucket.

#[serde(rename = "BucketFolder")] #[serde(skip_serializing_if = "Option::is_none")] pub bucket_folder: Option, @@ -1848,39 +1855,39 @@ pub struct RedshiftSettings { #[serde(rename = "BucketName")] #[serde(skip_serializing_if = "Option::is_none")] pub bucket_name: Option, - ///

Sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.

+ ///

A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.

#[serde(rename = "ConnectionTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub connection_timeout: Option, - ///

The name of the Amazon Redshift data warehouse (service) you are working with.

+ ///

The name of the Amazon Redshift data warehouse (service) that you are working with.

#[serde(rename = "DatabaseName")] #[serde(skip_serializing_if = "Option::is_none")] pub database_name: Option, - ///

The date format you are using. Valid values are auto (case-sensitive), your date format string enclosed in quotes, or NULL. If this is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using auto recognizes most strings, even some that are not supported when you use a date format string.

If your date and time values use formats different from each other, set this to auto.

+ ///

The date format that you are using. Valid values are auto (case-sensitive), your date format string enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using auto recognizes most strings, even some that aren't supported when you use a date format string.

If your date and time values use formats different from each other, set this to auto.

#[serde(rename = "DateFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub date_format: Option, - ///

Specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of TRUE sets empty CHAR and VARCHAR fields to null. The default is FALSE.

+ ///

A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true sets empty CHAR and VARCHAR fields to null. The default is false.

#[serde(rename = "EmptyAsNull")] #[serde(skip_serializing_if = "Option::is_none")] pub empty_as_null: Option, - ///

The type of server side encryption you want to use for your data. This is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (default) or SSE_KMS. To use SSE_S3, create an IAM role with a policy that allows "arn:aws:s3:::*" to use the following actions: "s3:PutObject", "s3:ListBucket".

+ ///

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS. To use SSE_S3, create an AWS Identity and Access Management (IAM) role with a policy that allows "arn:aws:s3:::*" to use the following actions: "s3:PutObject", "s3:ListBucket"

#[serde(rename = "EncryptionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption_mode: Option, - ///

Specifies the number of threads used to upload a single file. This accepts a value between 1 and 64. It defaults to 10.

+ ///

The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.

#[serde(rename = "FileTransferUploadStreams")] #[serde(skip_serializing_if = "Option::is_none")] pub file_transfer_upload_streams: Option, - ///

Sets the amount of time to wait (in milliseconds) before timing out, beginning from when you begin loading.

+ ///

The amount of time to wait (in milliseconds) before timing out, beginning from when you begin loading.

#[serde(rename = "LoadTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub load_timeout: Option, - ///

Specifies the maximum size (in KB) of any CSV file used to transfer data to Amazon Redshift. This accepts a value between 1 and 1048576. It defaults to 32768 KB (32 MB).

+ ///

The maximum size (in KB) of any .csv file used to transfer data to Amazon Redshift. This accepts a value from 1 through 1,048,576. It defaults to 32,768 KB (32 MB).

#[serde(rename = "MaxFileSize")] #[serde(skip_serializing_if = "Option::is_none")] pub max_file_size: Option, - ///

The password for the user named in the username property.

+ ///

The password for the user named in the username property.

#[serde(rename = "Password")] #[serde(skip_serializing_if = "Option::is_none")] pub password: Option, @@ -1888,15 +1895,15 @@ pub struct RedshiftSettings { #[serde(rename = "Port")] #[serde(skip_serializing_if = "Option::is_none")] pub port: Option, - ///

Removes surrounding quotation marks from strings in the incoming data. All characters within the quotation marks, including delimiters, are retained. Choose TRUE to remove quotation marks. The default is FALSE.

+ ///

A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters within the quotation marks, including delimiters, are retained. Choose true to remove quotation marks. The default is false.

#[serde(rename = "RemoveQuotes")] #[serde(skip_serializing_if = "Option::is_none")] pub remove_quotes: Option, - ///

Replaces invalid characters specified in ReplaceInvalidChars, substituting the specified value instead. The default is "?".

+ ///

A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars, substituting the specified characters instead. The default is "?".

#[serde(rename = "ReplaceChars")] #[serde(skip_serializing_if = "Option::is_none")] pub replace_chars: Option, - ///

A list of chars you want to replace. Use with ReplaceChars.

+ ///

A list of characters that you want to replace. Use with ReplaceChars.

#[serde(rename = "ReplaceInvalidChars")] #[serde(skip_serializing_if = "Option::is_none")] pub replace_invalid_chars: Option, @@ -1904,23 +1911,23 @@ pub struct RedshiftSettings { #[serde(rename = "ServerName")] #[serde(skip_serializing_if = "Option::is_none")] pub server_name: Option, - ///

If you are using SSE_KMS for the EncryptionMode, provide the KMS Key ID. The key you use needs an attached policy that enables IAM user permissions and allows use of the key.

+ ///

The AWS KMS key ID. If you are using SSE_KMS for the EncryptionMode, provide this key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.

#[serde(rename = "ServerSideEncryptionKmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub server_side_encryption_kms_key_id: Option, - ///

The ARN of the role that has access to the Redshift service.

+ ///

The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service.

#[serde(rename = "ServiceAccessRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub service_access_role_arn: Option, - ///

The time format you want to use. Valid values are auto (case-sensitive), 'timeformat_string', 'epochsecs', or 'epochmillisecs'. It defaults to 10. Using auto recognizes most strings, even some that are not supported when you use a time format string.

If your date and time values use formats different from each other, set this to auto.

+ ///

The time format that you want to use. Valid values are auto (case-sensitive), 'timeformat_string', 'epochsecs', or 'epochmillisecs'. It defaults to 10. Using auto recognizes most strings, even some that aren't supported when you use a time format string.

If your date and time values use formats different from each other, set this parameter to auto.

#[serde(rename = "TimeFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub time_format: Option, - ///

Removes the trailing white space characters from a VARCHAR string. This parameter applies only to columns with a VARCHAR data type. Choose TRUE to remove unneeded white space. The default is FALSE.

+ ///

A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter applies only to columns with a VARCHAR data type. Choose true to remove unneeded white space. The default is false.

#[serde(rename = "TrimBlanks")] #[serde(skip_serializing_if = "Option::is_none")] pub trim_blanks: Option, - ///

Truncates data in columns to the appropriate number of characters, so that it fits in the column. Applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or less. Choose TRUE to truncate data. The default is FALSE.

+ ///

A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or less. Choose true to truncate data. The default is false.

#[serde(rename = "TruncateColumns")] #[serde(skip_serializing_if = "Option::is_none")] pub truncate_columns: Option, @@ -1928,7 +1935,7 @@ pub struct RedshiftSettings { #[serde(rename = "Username")] #[serde(skip_serializing_if = "Option::is_none")] pub username: Option, - ///

The size of the write buffer to use in rows. Valid values range from 1 to 2048. Defaults to 1024. Use this setting to tune performance.

+ ///

The size of the write buffer to use in rows. Valid values range from 1 through 2,048. The default is 1,024. Use this setting to tune performance.

#[serde(rename = "WriteBufferSize")] #[serde(skip_serializing_if = "Option::is_none")] pub write_buffer_size: Option, @@ -1947,7 +1954,7 @@ pub struct RefreshSchemasMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RefreshSchemasResponse { ///

The status of the refreshed schema.

#[serde(rename = "RefreshSchemasStatus")] @@ -1957,7 +1964,7 @@ pub struct RefreshSchemasResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RefreshSchemasStatus { ///

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

#[serde(rename = "EndpointArn")] @@ -1996,7 +2003,7 @@ pub struct ReloadTablesMessage { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReloadTablesResponse { ///

The Amazon Resource Name (ARN) of the replication task.

#[serde(rename = "ReplicationTaskArn")] @@ -2004,10 +2011,10 @@ pub struct ReloadTablesResponse { pub replication_task_arn: Option, } -///

+///

Removes one or more tags from an AWS DMS resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RemoveTagsFromResourceMessage { - ///

>The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be removed from.

+ ///

An AWS DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).

#[serde(rename = "ResourceArn")] pub resource_arn: String, ///

The tag key (name) of the tag to be removed.

@@ -2017,12 +2024,12 @@ pub struct RemoveTagsFromResourceMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsFromResourceResponse {} ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationInstance { ///

The amount of storage (in gigabytes) that is allocated for the replication instance.

#[serde(rename = "AllocatedStorage")] @@ -2052,11 +2059,11 @@ pub struct ReplicationInstance { #[serde(rename = "InstanceCreateTime")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_create_time: Option, - ///

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

+ ///

An AWS KMS key identifier that is used to encrypt the data on the replication instance.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, - ///

Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

+ ///

Specifies whether the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

#[serde(rename = "MultiAZ")] #[serde(skip_serializing_if = "Option::is_none")] pub multi_az: Option, @@ -2084,11 +2091,11 @@ pub struct ReplicationInstance { #[serde(rename = "ReplicationInstanceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_instance_identifier: Option, - ///

The private IP address of the replication instance.

+ ///

One or more private IP addresses for the replication instance.

#[serde(rename = "ReplicationInstancePrivateIpAddresses")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_instance_private_ip_addresses: Option>, - ///

The public IP address of the replication instance.

+ ///

One or more public IP addresses for the replication instance.

#[serde(rename = "ReplicationInstancePublicIpAddresses")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_instance_public_ip_addresses: Option>, @@ -2112,7 +2119,7 @@ pub struct ReplicationInstance { ///

Contains metadata for a replication instance task log.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationInstanceTaskLog { ///

The size, in bytes, of the replication task log.

#[serde(rename = "ReplicationInstanceTaskLogSize")] @@ -2130,7 +2137,7 @@ pub struct ReplicationInstanceTaskLog { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationPendingModifiedValues { ///

The amount of storage (in gigabytes) that is allocated for the replication instance.

#[serde(rename = "AllocatedStorage")] @@ -2140,7 +2147,7 @@ pub struct ReplicationPendingModifiedValues { #[serde(rename = "EngineVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_version: Option, - ///

Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

+ ///

Specifies whether the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

#[serde(rename = "MultiAZ")] #[serde(skip_serializing_if = "Option::is_none")] pub multi_az: Option, @@ -2152,9 +2159,9 @@ pub struct ReplicationPendingModifiedValues { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationSubnetGroup { - ///

The description of the replication subnet group.

+ ///

A description for the replication subnet group.

#[serde(rename = "ReplicationSubnetGroupDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub replication_subnet_group_description: Option, @@ -2178,9 +2185,9 @@ pub struct ReplicationSubnetGroup { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationTask { - ///

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position "checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

+ ///

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want the CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position "checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

#[serde(rename = "CdcStartPosition")] #[serde(skip_serializing_if = "Option::is_none")] pub cdc_start_position: Option, @@ -2252,7 +2259,7 @@ pub struct ReplicationTask { ///

The task assessment report in JSON format.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationTaskAssessmentResult { ///

The task assessment results in JSON format.

#[serde(rename = "AssessmentResults")] @@ -2286,7 +2293,7 @@ pub struct ReplicationTaskAssessmentResult { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationTaskStats { ///

The elapsed time of the task, in milliseconds.

#[serde(rename = "ElapsedTimeMillis")] @@ -2316,13 +2323,13 @@ pub struct ReplicationTaskStats { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourcePendingMaintenanceActions { ///

Detailed information about the pending maintenance action.

#[serde(rename = "PendingMaintenanceActionDetails")] #[serde(skip_serializing_if = "Option::is_none")] pub pending_maintenance_action_details: Option>, - ///

The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) in the DMS documentation.

+ ///

The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) for AWS DMS in the DMS documentation.

#[serde(rename = "ResourceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_identifier: Option, @@ -2331,7 +2338,7 @@ pub struct ResourcePendingMaintenanceActions { ///

Settings for exporting data to Amazon S3.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct S3Settings { - ///

An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path <bucketFolder>/<schema_name>/<table_name>/. If this parameter is not specified, then the path used is <schema_name>/<table_name>/.

+ ///

An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path bucketFolder/schema_name/table_name/. If this parameter is not specified, then the path used is schema_name/table_name/.

#[serde(rename = "BucketFolder")] #[serde(skip_serializing_if = "Option::is_none")] pub bucket_folder: Option, @@ -2339,11 +2346,11 @@ pub struct S3Settings { #[serde(rename = "BucketName")] #[serde(skip_serializing_if = "Option::is_none")] pub bucket_name: Option, - ///

Option to write only INSERT operations to the comma-separated value (CSV) output files. By default, the first field in a CSV record contains the letter I (insert), U (update) or D (delete) to indicate whether the row was inserted, updated, or deleted at the source database. If cdcInsertsOnly is set to true, then only INSERTs are recorded in the CSV file, without the I annotation on each line. Valid values are TRUE and FALSE.

+ ///

A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.

If cdcInsertsOnly is set to true or y, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad is set to false, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports this interaction between CdcInsertsOnly and IncludeOpForFullLoad in versions 3.1.4 and later.

#[serde(rename = "CdcInsertsOnly")] #[serde(skip_serializing_if = "Option::is_none")] pub cdc_inserts_only: Option, - ///

An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.

+ ///

An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both .csv and .parquet file formats.

#[serde(rename = "CompressionType")] #[serde(skip_serializing_if = "Option::is_none")] pub compression_type: Option, @@ -2355,27 +2362,27 @@ pub struct S3Settings { #[serde(rename = "CsvRowDelimiter")] #[serde(skip_serializing_if = "Option::is_none")] pub csv_row_delimiter: Option, - ///

The format of the data which you want to use for output. You can choose one of the following:

  • CSV : This is a row-based format with comma-separated values.

  • PARQUET : Apache Parquet is a columnar storage format that features efficient compression and provides faster query response.

+ ///

The format of the data that you want to use for output. You can choose one of the following:

  • csv : This is a row-based file format with comma-separated values (.csv).

  • parquet : Apache Parquet (.parquet) is a columnar storage file format that features efficient compression and provides faster query response.

#[serde(rename = "DataFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub data_format: Option, - ///

The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET format only.

+ ///

The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for .parquet file format only.

#[serde(rename = "DataPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub data_page_size: Option, - ///

The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is stored using an encoding type of PLAIN. Defaults to 1024 * 1024 bytes (1MiB), the maximum size of a dictionary page before it reverts to PLAIN encoding. For PARQUET format only.

+ ///

The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is stored using an encoding type of PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN encoding. This size is used for .parquet file format only.

#[serde(rename = "DictPageSizeLimit")] #[serde(skip_serializing_if = "Option::is_none")] pub dict_page_size_limit: Option, - ///

Enables statistics for Parquet pages and rowGroups. Choose TRUE to enable statistics, choose FALSE to disable. Statistics include NULL, DISTINCT, MAX, and MIN values. Defaults to TRUE. For PARQUET format only.

+ ///

A value that enables statistics for Parquet pages and row groups. Choose true to enable statistics, false to disable. Statistics include NULL, DISTINCT, MAX, and MIN values. This parameter defaults to true. This value is used for .parquet file format only.

#[serde(rename = "EnableStatistics")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_statistics: Option, - ///

The type of encoding you are using: RLEDICTIONARY (default), PLAIN, or PLAINDICTIONARY.

  • RLEDICTIONARY uses a combination of bit-packing and run-length encoding to store repeated values more efficiently.

  • PLAIN does not use encoding at all. Values are stored as they are.

  • PLAINDICTIONARY builds a dictionary of the values encountered in a given column. The dictionary is stored in a dictionary page for each column chunk.

+ ///

The type of encoding you are using:

  • RLEDICTIONARY uses a combination of bit-packing and run-length encoding to store repeated values more efficiently. This is the default.

  • PLAIN doesn't use encoding at all. Values are stored as they are.

  • PLAINDICTIONARY builds a dictionary of the values encountered in a given column. The dictionary is stored in a dictionary page for each column chunk.

#[serde(rename = "EncodingType")] #[serde(skip_serializing_if = "Option::is_none")] pub encoding_type: Option, - ///

The type of server side encryption you want to use for your data. This is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSES3 (default) or SSEKMS. To use SSE_S3, you need an IAM role with permission to allow "arn:aws:s3:::dms-*" to use the following actions:

  • s3:CreateBucket

  • s3:ListBucket

  • s3:DeleteBucket

  • s3:GetBucketLocation

  • s3:GetObject

  • s3:PutObject

  • s3:DeleteObject

  • s3:GetObjectVersion

  • s3:GetBucketPolicy

  • s3:PutBucketPolicy

  • s3:DeleteBucketPolicy

+ ///

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSES3 (the default) or SSEKMS. To use SSE_S3, you need an AWS Identity and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*" to use the following actions:

  • s3:CreateBucket

  • s3:ListBucket

  • s3:DeleteBucket

  • s3:GetBucketLocation

  • s3:GetObject

  • s3:PutObject

  • s3:DeleteObject

  • s3:GetObjectVersion

  • s3:GetBucketPolicy

  • s3:PutBucketPolicy

  • s3:DeleteBucketPolicy

#[serde(rename = "EncryptionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption_mode: Option, @@ -2383,15 +2390,19 @@ pub struct S3Settings { #[serde(rename = "ExternalTableDefinition")] #[serde(skip_serializing_if = "Option::is_none")] pub external_table_definition: Option, - ///

The version of Apache Parquet format you want to use: PARQUET_1_0 (default) or PARQUET_2_0.

+ ///

A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.

AWS DMS supports IncludeOpForFullLoad in versions 3.1.4 and later.

For full load, records can only be inserted. By default (the false setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.

This setting works together with CdcInsertsOnly for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

+ #[serde(rename = "IncludeOpForFullLoad")] + #[serde(skip_serializing_if = "Option::is_none")] + pub include_op_for_full_load: Option, + ///

The version of the Apache Parquet format that you want to use: parquet_1_0 (the default) or parquet_2_0.

#[serde(rename = "ParquetVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub parquet_version: Option, - ///

The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For PARQUET format only.

If you choose a value larger than the maximum, RowGroupLength is set to the max row group length in bytes (64 * 1024 * 1024).

+ ///

The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet file format only.

If you choose a value larger than the maximum, RowGroupLength is set to the max row group length in bytes (64 * 1024 * 1024).

#[serde(rename = "RowGroupLength")] #[serde(skip_serializing_if = "Option::is_none")] pub row_group_length: Option, - ///

If you are using SSE_KMS for the EncryptionMode, provide the KMS Key ID. The key you use needs an attached policy that enables IAM user permissions and allows use of the key.

Here is a CLI example: aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>

+ ///

If you are using SSE_KMS for the EncryptionMode, provide the AWS KMS key ID. The key that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and allows use of the key.

Here is a CLI example: aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value

#[serde(rename = "ServerSideEncryptionKmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub server_side_encryption_kms_key_id: Option, @@ -2399,6 +2410,10 @@ pub struct S3Settings { #[serde(rename = "ServiceAccessRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub service_access_role_arn: Option, + ///

A value that includes a timestamp column in the Amazon S3 target endpoint data. AWS DMS includes an additional column in the migrated data when you set timestampColumnName to a non-blank value.

AWS DMS supports TimestampColumnName in versions 3.1.4 and later.

For a full load, each row of the timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS. For a CDC load, each row of the timestamp column contains the timestamp for the commit of that row in the source database. The format for the timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. For CDC, the microsecond precision depends on the commit timestamp supported by DMS for the source database. When the AddColumnName setting is set to true, DMS also includes the name for the timestamp column that you set as the nonblank value of timestampColumnName.

+ #[serde(rename = "TimestampColumnName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timestamp_column_name: Option, } ///

@@ -2411,7 +2426,7 @@ pub struct StartReplicationTaskAssessmentMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartReplicationTaskAssessmentResponse { ///

The assessed replication task.

#[serde(rename = "ReplicationTask")] @@ -2444,7 +2459,7 @@ pub struct StartReplicationTaskMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartReplicationTaskResponse { ///

The replication task started.

#[serde(rename = "ReplicationTask")] @@ -2462,7 +2477,7 @@ pub struct StopReplicationTaskMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopReplicationTaskResponse { ///

The replication task stopped.

#[serde(rename = "ReplicationTask")] @@ -2472,7 +2487,7 @@ pub struct StopReplicationTaskResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Subnet { ///

The Availability Zone of the subnet.

#[serde(rename = "SubnetAvailabilityZone")] @@ -2490,9 +2505,9 @@ pub struct Subnet { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SupportedEndpointType { - ///

The type of endpoint.

+ ///

The type of endpoint. Valid values are source and target.

#[serde(rename = "EndpointType")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_type: Option, @@ -2500,7 +2515,7 @@ pub struct SupportedEndpointType { #[serde(rename = "EngineDisplayName")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_display_name: Option, - ///

The database engine name. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, sybase, dynamodb, mongodb, and sqlserver.

+ ///

The database engine name. Valid values, depending on the EndpointType, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver.

#[serde(rename = "EngineName")] #[serde(skip_serializing_if = "Option::is_none")] pub engine_name: Option, @@ -2512,7 +2527,7 @@ pub struct SupportedEndpointType { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TableStatistics { ///

The Data Definition Language (DDL) used to build and modify the structure of your tables.

#[serde(rename = "Ddls")] @@ -2619,7 +2634,7 @@ pub struct TestConnectionMessage { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestConnectionResponse { ///

The connection tested.

#[serde(rename = "Connection")] @@ -2629,7 +2644,7 @@ pub struct TestConnectionResponse { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcSecurityGroupMembership { ///

The status of the VPC security group.

#[serde(rename = "Status")] @@ -2720,7 +2735,7 @@ pub enum CreateEndpointError { AccessDeniedFault(String), ///

The resource is in a state that prevents it from being used for database migration.

InvalidResourceStateFault(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The resource you are attempting to create already exists.

ResourceAlreadyExistsFault(String), @@ -2789,15 +2804,15 @@ impl Error for CreateEndpointError { /// Errors returned by CreateEventSubscription #[derive(Debug, PartialEq)] pub enum CreateEventSubscriptionError { - ///

The ciphertext references a key that doesn't exist or DMS account doesn't have an access to

+ ///

The ciphertext references a key that doesn't exist or that the DMS account doesn't have access to.

KMSAccessDeniedFault(String), ///

The specified master key (CMK) isn't enabled.

KMSDisabledFault(String), - ///

The state of the specified KMS resource isn't valid for this request.

+ ///

The state of the specified AWS KMS resource isn't valid for this request.

KMSInvalidStateFault(String), - ///

The specified KMS entity or resource can't be found.

+ ///

The specified AWS KMS entity or resource can't be found.

KMSNotFoundFault(String), - ///

This request triggered KMS request throttling.

+ ///

This request triggered AWS KMS request throttling.

KMSThrottlingFault(String), ///

The resource you are attempting to create already exists.

ResourceAlreadyExistsFault(String), @@ -2904,7 +2919,7 @@ pub enum CreateReplicationInstanceError { InvalidResourceStateFault(String), ///

The subnet provided is invalid.

InvalidSubnet(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The replication subnet group does not cover enough Availability Zones (AZs). Edit the replication subnet group and add more AZs.

ReplicationSubnetGroupDoesNotCoverEnoughAZs(String), @@ -3090,7 +3105,7 @@ pub enum CreateReplicationTaskError { AccessDeniedFault(String), ///

The resource is in a state that prevents it from being used for database migration.

InvalidResourceStateFault(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The resource you are attempting to create already exists.

ResourceAlreadyExistsFault(String), @@ -4145,7 +4160,7 @@ pub enum ModifyEndpointError { AccessDeniedFault(String), ///

The resource is in a state that prevents it from being used for database migration.

InvalidResourceStateFault(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The resource you are attempting to create already exists.

ResourceAlreadyExistsFault(String), @@ -4206,15 +4221,15 @@ impl Error for ModifyEndpointError { /// Errors returned by ModifyEventSubscription #[derive(Debug, PartialEq)] pub enum ModifyEventSubscriptionError { - ///

The ciphertext references a key that doesn't exist or DMS account doesn't have an access to

+ ///

The ciphertext references a key that doesn't exist or that the DMS account doesn't have access to.

KMSAccessDeniedFault(String), ///

The specified master key (CMK) isn't enabled.

KMSDisabledFault(String), - ///

The state of the specified KMS resource isn't valid for this request.

+ ///

The state of the specified AWS KMS resource isn't valid for this request.

KMSInvalidStateFault(String), - ///

The specified KMS entity or resource can't be found.

+ ///

The specified AWS KMS entity or resource can't be found.

KMSNotFoundFault(String), - ///

This request triggered KMS request throttling.

+ ///

This request triggered AWS KMS request throttling.

KMSThrottlingFault(String), ///

The resource could not be found.

ResourceNotFoundFault(String), @@ -4469,7 +4484,7 @@ impl Error for ModifyReplicationSubnetGroupError { pub enum ModifyReplicationTaskError { ///

The resource is in a state that prevents it from being used for database migration.

InvalidResourceStateFault(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The resource you are attempting to create already exists.

ResourceAlreadyExistsFault(String), @@ -4571,7 +4586,7 @@ impl Error for RebootReplicationInstanceError { pub enum RefreshSchemasError { ///

The resource is in a state that prevents it from being used for database migration.

InvalidResourceStateFault(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The resource could not be found.

ResourceNotFoundFault(String), @@ -4845,7 +4860,7 @@ impl Error for StopReplicationTaskError { pub enum TestConnectionError { ///

The resource is in a state that prevents it from being used for database migration.

InvalidResourceStateFault(String), - ///

AWS DMS cannot access the KMS key.

+ ///

AWS DMS cannot access the AWS KMS key.

KMSKeyNotAccessibleFault(String), ///

The resource could not be found.

ResourceNotFoundFault(String), @@ -4979,7 +4994,7 @@ pub trait DatabaseMigrationService { input: DeleteReplicationTaskMessage, ) -> RusotoFuture; - ///

Lists all of the AWS DMS attributes for a customer account. The attributes include AWS DMS quotas for the account, such as the number of replication instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

This command does not take any parameters.

+ ///

Lists all of the AWS DMS attributes for a customer account. These attributes include AWS DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.

This command does not take any parameters.

fn describe_account_attributes( &self, ) -> RusotoFuture; @@ -5200,10 +5215,7 @@ impl DatabaseMigrationServiceClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DatabaseMigrationServiceClient { - DatabaseMigrationServiceClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5217,10 +5229,17 @@ impl DatabaseMigrationServiceClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DatabaseMigrationServiceClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client( + client: Client, + region: region::Region, + ) -> DatabaseMigrationServiceClient { + DatabaseMigrationServiceClient { client, region } } } @@ -5595,7 +5614,7 @@ impl DatabaseMigrationService for DatabaseMigrationServiceClient { }) } - ///

Lists all of the AWS DMS attributes for a customer account. The attributes include AWS DMS quotas for the account, such as the number of replication instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

This command does not take any parameters.

+ ///

Lists all of the AWS DMS attributes for a customer account. These attributes include AWS DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.

This command does not take any parameters.

fn describe_account_attributes( &self, ) -> RusotoFuture { diff --git a/rusoto/services/docdb/Cargo.toml b/rusoto/services/docdb/Cargo.toml index f255b8daf02..df038dd7bcc 100644 --- a/rusoto/services/docdb/Cargo.toml +++ b/rusoto/services/docdb/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_docdb" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/docdb/README.md b/rusoto/services/docdb/README.md index 3b991e46049..3b9e4c948d4 100644 --- a/rusoto/services/docdb/README.md +++ b/rusoto/services/docdb/README.md @@ -23,9 +23,16 @@ To use `rusoto_docdb` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_docdb = "0.40.0" +rusoto_docdb = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/docdb/src/custom/mod.rs b/rusoto/services/docdb/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/docdb/src/custom/mod.rs +++ b/rusoto/services/docdb/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/docdb/src/generated.rs b/rusoto/services/docdb/src/generated.rs index 18459ef81f4..e5e42f904f4 100644 --- a/rusoto/services/docdb/src/generated.rs +++ b/rusoto/services/docdb/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -451,6 +450,8 @@ pub struct CreateDBClusterMessage { pub db_cluster_parameter_group_name: Option, ///

A DB subnet group to associate with this DB cluster.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

pub db_subnet_group_name: Option, + ///

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

+ pub deletion_protection: Option, ///

A list of log types that need to be enabled for exporting to Amazon CloudWatch Logs.

pub enable_cloudwatch_logs_exports: Option>, ///

The name of the database engine to be used for this DB cluster.

Valid values: docdb

@@ -459,10 +460,10 @@ pub struct CreateDBClusterMessage { pub engine_version: Option, ///

The AWS KMS key identifier for an encrypted DB cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a DB cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new DB cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If an encryption key is not specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon DocumentDB uses the encryption key that is used to encrypt the source. Otherwise, Amazon DocumentDB uses your default encryption key.

  • If the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, Amazon DocumentDB uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the replica in that AWS Region.

pub kms_key_id: Option, - ///

The password for the master database user. This password can contain any printable ASCII character except "/", """, or "@".

Constraints: Must contain from 8 to 41 characters.

- pub master_user_password: Option, + ///

The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@).

Constraints: Must contain from 8 to 41 characters.

+ pub master_user_password: String, ///

The name of the master user for the DB cluster.

Constraints:

  • Must be from 1 to 16 letters or numbers.

  • The first character must be a letter.

  • Cannot be a reserved word for the chosen database engine.

- pub master_username: Option, + pub master_username: String, ///

The port number on which the instances in the DB cluster accept connections.

pub port: Option, ///

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

@@ -512,6 +513,9 @@ impl CreateDBClusterMessageSerializer { if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } + if let Some(ref field_value) = obj.deletion_protection { + params.put(&format!("{}{}", prefix, "DeletionProtection"), &field_value); + } if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { LogTypeListSerializer::serialize( params, @@ -526,12 +530,14 @@ impl CreateDBClusterMessageSerializer { if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } - if let Some(ref field_value) = obj.master_user_password { - params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); - } - if let Some(ref field_value) = obj.master_username { - params.put(&format!("{}{}", prefix, "MasterUsername"), &field_value); - } + params.put( + &format!("{}{}", prefix, "MasterUserPassword"), + &obj.master_user_password, + ); + params.put( + &format!("{}{}", prefix, "MasterUsername"), + &obj.master_username, + ); if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value); } @@ -727,7 +733,7 @@ pub struct CreateDBInstanceMessage { pub availability_zone: Option, ///

The identifier of the DB cluster that the instance will belong to.

pub db_cluster_identifier: String, - ///

The compute and memory capacity of the DB instance; for example, db.m4.large.

+ ///

The compute and memory capacity of the DB instance; for example, db.r5.large.

pub db_instance_class: String, ///

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

pub db_instance_identifier: String, @@ -905,6 +911,8 @@ pub struct DBCluster { pub db_subnet_group: Option, ///

The AWS Region-unique, immutable identifier for the DB cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB cluster is accessed.

pub db_cluster_resource_id: Option, + ///

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

+ pub deletion_protection: Option, ///

The earliest time to which a database can be restored with point-in-time restore.

pub earliest_restorable_time: Option, ///

A list of log types that this DB cluster is configured to export to Amazon CloudWatch Logs.

@@ -1003,6 +1011,12 @@ impl DBClusterDeserializer { stack, )?); } + "DeletionProtection" => { + obj.deletion_protection = Some(BooleanDeserializer::deserialize( + "DeletionProtection", + stack, + )?); + } "EarliestRestorableTime" => { obj.earliest_restorable_time = Some(TStampDeserializer::deserialize( "EarliestRestorableTime", @@ -3682,9 +3696,11 @@ pub struct ModifyDBClusterMessage { pub db_cluster_identifier: String, ///

The name of the DB cluster parameter group to use for the DB cluster.

pub db_cluster_parameter_group_name: Option, + ///

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

+ pub deletion_protection: Option, ///

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

pub engine_version: Option, - ///

The new password for the master database user. This password can contain any printable ASCII character except "/", """, or "@".

Constraints: Must contain from 8 to 41 characters.

+ ///

The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@).

Constraints: Must contain from 8 to 41 characters.

pub master_user_password: Option, ///

The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster2

pub new_db_cluster_identifier: Option, @@ -3733,6 +3749,9 @@ impl ModifyDBClusterMessageSerializer { &field_value, ); } + if let Some(ref field_value) = obj.deletion_protection { + params.put(&format!("{}{}", prefix, "DeletionProtection"), &field_value); + } if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } @@ -3908,7 +3927,7 @@ pub struct ModifyDBInstanceMessage { pub apply_immediately: Option, ///

Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case, and the change is asynchronously applied as soon as possible. An outage results if this parameter is set to true during the maintenance window, and a newer minor version is available, and Amazon DocumentDB has enabled automatic patching for that engine version.

pub auto_minor_version_upgrade: Option, - ///

The new compute and memory capacity of the DB instance; for example, db.m4.large. Not all DB instance classes are available in all AWS Regions.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting.

+ ///

The new compute and memory capacity of the DB instance; for example, db.r5.large. Not all DB instance classes are available in all AWS Regions.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting.

pub db_instance_class: Option, ///

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

pub db_instance_identifier: String, @@ -4784,6 +4803,8 @@ pub struct RestoreDBClusterFromSnapshotMessage { pub db_cluster_identifier: String, ///

The name of the DB subnet group to use for the new DB cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

pub db_subnet_group_name: Option, + ///

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

+ pub deletion_protection: Option, ///

A list of log types that must be enabled for exporting to Amazon CloudWatch Logs.

pub enable_cloudwatch_logs_exports: Option>, ///

The database engine to use for the new DB cluster.

Default: The same as source.

Constraint: Must be compatible with the engine of the source.

@@ -4825,6 +4846,9 @@ impl RestoreDBClusterFromSnapshotMessageSerializer { if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } + if let Some(ref field_value) = obj.deletion_protection { + params.put(&format!("{}{}", prefix, "DeletionProtection"), &field_value); + } if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { LogTypeListSerializer::serialize( params, @@ -4894,6 +4918,8 @@ pub struct RestoreDBClusterToPointInTimeMessage { pub db_cluster_identifier: String, ///

The DB subnet group name to use for the new DB cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

pub db_subnet_group_name: Option, + ///

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

+ pub deletion_protection: Option, ///

A list of log types that must be enabled for exporting to Amazon CloudWatch Logs.

pub enable_cloudwatch_logs_exports: Option>, ///

The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new DB cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

You can restore to a new DB cluster and encrypt the new DB cluster with an AWS KMS key that is different from the AWS KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the AWS KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

  • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the AWS KMS key that was used to encrypt the source DB cluster.

  • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.

If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.

@@ -4928,6 +4954,9 @@ impl RestoreDBClusterToPointInTimeMessageSerializer { if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } + if let Some(ref field_value) = obj.deletion_protection { + params.put(&format!("{}{}", prefix, "DeletionProtection"), &field_value); + } if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { LogTypeListSerializer::serialize( params, @@ -5006,6 +5035,96 @@ impl SourceTypeDeserializer { Ok(obj) } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct StartDBClusterMessage { + ///

The identifier of the cluster to restart. Example: docdb-2019-05-28-15-24-52

+ pub db_cluster_identifier: String, +} + +/// Serialize `StartDBClusterMessage` contents to a `SignedRequest`. +struct StartDBClusterMessageSerializer; +impl StartDBClusterMessageSerializer { + fn serialize(params: &mut Params, name: &str, obj: &StartDBClusterMessage) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put( + &format!("{}{}", prefix, "DBClusterIdentifier"), + &obj.db_cluster_identifier, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct StartDBClusterResult { + pub db_cluster: Option, +} + +struct StartDBClusterResultDeserializer; +impl StartDBClusterResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, StartDBClusterResult, _>(tag_name, stack, |name, stack, obj| { + match name { + "DBCluster" => { + obj.db_cluster = Some(DBClusterDeserializer::deserialize("DBCluster", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct StopDBClusterMessage { + ///

The identifier of the cluster to stop. Example: docdb-2019-05-28-15-24-52

+ pub db_cluster_identifier: String, +} + +/// Serialize `StopDBClusterMessage` contents to a `SignedRequest`. +struct StopDBClusterMessageSerializer; +impl StopDBClusterMessageSerializer { + fn serialize(params: &mut Params, name: &str, obj: &StopDBClusterMessage) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put( + &format!("{}{}", prefix, "DBClusterIdentifier"), + &obj.db_cluster_identifier, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct StopDBClusterResult { + pub db_cluster: Option, +} + +struct StopDBClusterResultDeserializer; +impl StopDBClusterResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, StopDBClusterResult, _>(tag_name, stack, |name, stack, obj| { + match name { + "DBCluster" => { + obj.db_cluster = Some(DBClusterDeserializer::deserialize("DBCluster", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} struct StringDeserializer; impl StringDeserializer { #[allow(unused_variables)] @@ -5401,6 +5520,10 @@ impl Error for AddTagsToResourceError { /// Errors returned by ApplyPendingMaintenanceAction #[derive(Debug, PartialEq)] pub enum ApplyPendingMaintenanceActionError { + ///

The DB cluster isn't in a valid state.

+ InvalidDBClusterStateFault(String), + ///

The specified DB instance isn't in the available state.

+ InvalidDBInstanceStateFault(String), ///

The specified resource ID was not found.

ResourceNotFoundFault(String), } @@ -5415,6 +5538,20 @@ impl ApplyPendingMaintenanceActionError { find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { + "InvalidDBClusterStateFault" => { + return RusotoError::Service( + ApplyPendingMaintenanceActionError::InvalidDBClusterStateFault( + parsed_error.message, + ), + ) + } + "InvalidDBInstanceState" => { + return RusotoError::Service( + ApplyPendingMaintenanceActionError::InvalidDBInstanceStateFault( + parsed_error.message, + ), + ) + } "ResourceNotFoundFault" => { return RusotoError::Service( ApplyPendingMaintenanceActionError::ResourceNotFoundFault( @@ -5445,6 +5582,8 @@ impl fmt::Display for ApplyPendingMaintenanceActionError { impl Error for ApplyPendingMaintenanceActionError { fn description(&self) -> &str { match *self { + ApplyPendingMaintenanceActionError::InvalidDBClusterStateFault(ref cause) => cause, + ApplyPendingMaintenanceActionError::InvalidDBInstanceStateFault(ref cause) => cause, ApplyPendingMaintenanceActionError::ResourceNotFoundFault(ref cause) => cause, } } @@ -8362,6 +8501,132 @@ impl Error for RestoreDBClusterToPointInTimeError { } } } +/// Errors returned by StartDBCluster +#[derive(Debug, PartialEq)] +pub enum StartDBClusterError { + ///

DBClusterIdentifier doesn't refer to an existing DB cluster.

+ DBClusterNotFoundFault(String), + ///

The DB cluster isn't in a valid state.

+ InvalidDBClusterStateFault(String), + ///

The specified DB instance isn't in the available state.

+ InvalidDBInstanceStateFault(String), +} + +impl StartDBClusterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "DBClusterNotFoundFault" => { + return RusotoError::Service(StartDBClusterError::DBClusterNotFoundFault( + parsed_error.message, + )) + } + "InvalidDBClusterStateFault" => { + return RusotoError::Service( + StartDBClusterError::InvalidDBClusterStateFault(parsed_error.message), + ) + } + "InvalidDBInstanceState" => { + return RusotoError::Service( + StartDBClusterError::InvalidDBInstanceStateFault(parsed_error.message), + ) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for StartDBClusterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartDBClusterError { + fn description(&self) -> &str { + match *self { + StartDBClusterError::DBClusterNotFoundFault(ref cause) => cause, + StartDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, + StartDBClusterError::InvalidDBInstanceStateFault(ref cause) => cause, + } + } +} +/// Errors returned by StopDBCluster +#[derive(Debug, PartialEq)] +pub enum StopDBClusterError { + ///

DBClusterIdentifier doesn't refer to an existing DB cluster.

+ DBClusterNotFoundFault(String), + ///

The DB cluster isn't in a valid state.

+ InvalidDBClusterStateFault(String), + ///

The specified DB instance isn't in the available state.

+ InvalidDBInstanceStateFault(String), +} + +impl StopDBClusterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "DBClusterNotFoundFault" => { + return RusotoError::Service(StopDBClusterError::DBClusterNotFoundFault( + parsed_error.message, + )) + } + "InvalidDBClusterStateFault" => { + return RusotoError::Service( + StopDBClusterError::InvalidDBClusterStateFault(parsed_error.message), + ) + } + "InvalidDBInstanceState" => { + return RusotoError::Service( + StopDBClusterError::InvalidDBInstanceStateFault(parsed_error.message), + ) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for StopDBClusterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StopDBClusterError { + fn description(&self) -> &str { + match *self { + StopDBClusterError::DBClusterNotFoundFault(ref cause) => cause, + StopDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, + StopDBClusterError::InvalidDBInstanceStateFault(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the Amazon DocDB API. Amazon DocDB clients implement this trait. pub trait Docdb { ///

Adds metadata tags to an Amazon DocumentDB resource. You can use these tags with cost allocation reporting to track costs that are associated with Amazon DocumentDB resources. or in a Condition statement in an AWS Identity and Access Management (IAM) policy for Amazon DocumentDB.

@@ -8603,6 +8868,18 @@ pub trait Docdb { &self, input: RestoreDBClusterToPointInTimeMessage, ) -> RusotoFuture; + + ///

Restarts the stopped cluster that is specified by DBClusterIdentifier. For more information, see Stopping and Starting an Amazon DocumentDB Cluster.

+ fn start_db_cluster( + &self, + input: StartDBClusterMessage, + ) -> RusotoFuture; + + ///

Stops the running cluster that is specified by DBClusterIdentifier. The cluster must be in the available state. For more information, see Stopping and Starting an Amazon DocumentDB Cluster.

+ fn stop_db_cluster( + &self, + input: StopDBClusterMessage, + ) -> RusotoFuture; } /// A client for the Amazon DocDB API. #[derive(Clone)] @@ -8616,10 +8893,7 @@ impl DocdbClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DocdbClient { - DocdbClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -8633,10 +8907,14 @@ impl DocdbClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DocdbClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DocdbClient { + DocdbClient { client, region } } } @@ -8698,7 +8976,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8746,7 +9024,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8794,7 +9072,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8845,7 +9123,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8893,7 +9171,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8941,7 +9219,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8992,7 +9270,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9042,7 +9320,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9093,7 +9371,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9166,7 +9444,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9217,7 +9495,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9294,7 +9572,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9342,7 +9620,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9395,7 +9673,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9443,7 +9721,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9494,7 +9772,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9542,7 +9820,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9592,7 +9870,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9640,7 +9918,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9693,7 +9971,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9741,7 +10019,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9792,7 +10070,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9841,7 +10119,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9892,7 +10170,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9943,7 +10221,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9993,7 +10271,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10044,7 +10322,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10092,7 +10370,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10143,7 +10421,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10194,7 +10472,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10244,7 +10522,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10295,7 +10573,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10368,7 +10646,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10416,7 +10694,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10464,7 +10742,7 @@ impl Docdb for DocdbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10482,4 +10760,106 @@ impl Docdb for DocdbClient { })) }) } + + ///

Restarts the stopped cluster that is specified by DBClusterIdentifier. For more information, see Stopping and Starting an Amazon DocumentDB Cluster.

+ fn start_db_cluster( + &self, + input: StartDBClusterMessage, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "StartDBCluster"); + params.put("Version", "2014-10-31"); + StartDBClusterMessageSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(StartDBClusterError::from_response(response))), + ); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = StartDBClusterResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = StartDBClusterResultDeserializer::deserialize( + "StartDBClusterResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Stops the running cluster that is specified by DBClusterIdentifier. The cluster must be in the available state. For more information, see Stopping and Starting an Amazon DocumentDB Cluster.

+ fn stop_db_cluster( + &self, + input: StopDBClusterMessage, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "StopDBCluster"); + params.put("Version", "2014-10-31"); + StopDBClusterMessageSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(StopDBClusterError::from_response(response))), + ); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = StopDBClusterResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = StopDBClusterResultDeserializer::deserialize( + "StopDBClusterResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } } diff --git a/rusoto/services/ds/Cargo.toml b/rusoto/services/ds/Cargo.toml index e43a5c2c5df..a61afe672dc 100644 --- a/rusoto/services/ds/Cargo.toml +++ b/rusoto/services/ds/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ds" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ds/README.md b/rusoto/services/ds/README.md index 8f3ef153206..3b6038c31ce 100644 --- a/rusoto/services/ds/README.md +++ b/rusoto/services/ds/README.md @@ -23,9 +23,16 @@ To use `rusoto_ds` in your application, add it as a dependency in your `Cargo.to ```toml [dependencies] -rusoto_ds = "0.40.0" +rusoto_ds = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ds/src/custom/mod.rs b/rusoto/services/ds/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ds/src/custom/mod.rs +++ b/rusoto/services/ds/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ds/src/generated.rs b/rusoto/services/ds/src/generated.rs index e484ec4b5a6..455f8a667b9 100644 --- a/rusoto/services/ds/src/generated.rs +++ b/rusoto/services/ds/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct AcceptSharedDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptSharedDirectoryResult { ///

The shared directory in the directory consumer account.

#[serde(rename = "SharedDirectory")] @@ -55,7 +54,7 @@ pub struct AddIpRoutesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddIpRoutesResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -69,7 +68,7 @@ pub struct AddTagsToResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsToResourceResult {} ///

Represents a named directory attribute.

@@ -96,12 +95,12 @@ pub struct CancelSchemaExtensionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelSchemaExtensionResult {} ///

Contains information about a computer account in a directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Computer { ///

An array of Attribute objects containing the LDAP attributes that belong to the computer account.

#[serde(rename = "ComputerAttributes")] @@ -119,7 +118,7 @@ pub struct Computer { ///

Points to a remote domain with which you are setting up a trust relationship. Conditional forwarders are required in order to set up a trust relationship with another domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConditionalForwarder { ///

The IP addresses of the remote DNS server associated with RemoteDomainName. This is the IP address of the DNS server that your conditional forwarder points to.

#[serde(rename = "DnsIpAddrs")] @@ -166,7 +165,7 @@ pub struct ConnectDirectoryRequest { ///

Contains the results of the ConnectDirectory operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConnectDirectoryResult { ///

The identifier of the new directory.

#[serde(rename = "DirectoryId")] @@ -187,7 +186,7 @@ pub struct CreateAliasRequest { ///

Contains the results of the CreateAlias operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAliasResult { ///

The alias for the directory.

#[serde(rename = "Alias")] @@ -223,7 +222,7 @@ pub struct CreateComputerRequest { ///

Contains the results for the CreateComputer operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateComputerResult { ///

A Computer object that represents the computer account.

#[serde(rename = "Computer")] @@ -247,7 +246,7 @@ pub struct CreateConditionalForwarderRequest { ///

The result of a CreateConditinalForwarder request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConditionalForwarderResult {} ///

Contains the inputs for the CreateDirectory operation.

@@ -282,7 +281,7 @@ pub struct CreateDirectoryRequest { ///

Contains the results of the CreateDirectory operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDirectoryResult { ///

The identifier of the directory that was created.

#[serde(rename = "DirectoryId")] @@ -301,7 +300,7 @@ pub struct CreateLogSubscriptionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLogSubscriptionResult {} ///

Creates an AWS Managed Microsoft AD directory.

@@ -336,7 +335,7 @@ pub struct CreateMicrosoftADRequest { ///

Result of a CreateMicrosoftAD request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateMicrosoftADResult { ///

The identifier of the directory that was created.

#[serde(rename = "DirectoryId")] @@ -358,7 +357,7 @@ pub struct CreateSnapshotRequest { ///

Contains the results of the CreateSnapshot operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSnapshotResult { ///

The identifier of the snapshot that was created.

#[serde(rename = "SnapshotId")] @@ -397,7 +396,7 @@ pub struct CreateTrustRequest { ///

The result of a CreateTrust request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTrustResult { ///

A unique identifier for the trust relationship that was created.

#[serde(rename = "TrustId")] @@ -418,7 +417,7 @@ pub struct DeleteConditionalForwarderRequest { ///

The result of a DeleteConditionalForwarder request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteConditionalForwarderResult {} ///

Contains the inputs for the DeleteDirectory operation.

@@ -431,7 +430,7 @@ pub struct DeleteDirectoryRequest { ///

Contains the results of the DeleteDirectory operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDirectoryResult { ///

The directory identifier.

#[serde(rename = "DirectoryId")] @@ -447,7 +446,7 @@ pub struct DeleteLogSubscriptionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLogSubscriptionResult {} ///

Contains the inputs for the DeleteSnapshot operation.

@@ -460,7 +459,7 @@ pub struct DeleteSnapshotRequest { ///

Contains the results of the DeleteSnapshot operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSnapshotResult { ///

The identifier of the directory snapshot that was deleted.

#[serde(rename = "SnapshotId")] @@ -482,7 +481,7 @@ pub struct DeleteTrustRequest { ///

The result of a DeleteTrust request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTrustResult { ///

The Trust ID of the trust relationship that was deleted.

#[serde(rename = "TrustId")] @@ -503,7 +502,7 @@ pub struct DeregisterEventTopicRequest { ///

The result of a DeregisterEventTopic request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterEventTopicResult {} ///

Describes a conditional forwarder.

@@ -520,7 +519,7 @@ pub struct DescribeConditionalForwardersRequest { ///

The result of a DescribeConditionalForwarder request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConditionalForwardersResult { ///

The list of conditional forwarders that have been created.

#[serde(rename = "ConditionalForwarders")] @@ -547,7 +546,7 @@ pub struct DescribeDirectoriesRequest { ///

Contains the results of the DescribeDirectories operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDirectoriesResult { ///

The list of DirectoryDescription objects that were retrieved.

It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

#[serde(rename = "DirectoryDescriptions")] @@ -579,7 +578,7 @@ pub struct DescribeDomainControllersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDomainControllersResult { ///

List of the DomainController objects that were retrieved.

#[serde(rename = "DomainControllers")] @@ -606,7 +605,7 @@ pub struct DescribeEventTopicsRequest { ///

The result of a DescribeEventTopic request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventTopicsResult { ///

A list of SNS topic names that receive status messages from the specified Directory ID.

#[serde(rename = "EventTopics")] @@ -634,7 +633,7 @@ pub struct DescribeSharedDirectoriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSharedDirectoriesResult { ///

If not null, token that indicates that more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeSharedDirectories to retrieve the next set of items.

#[serde(rename = "NextToken")] @@ -669,7 +668,7 @@ pub struct DescribeSnapshotsRequest { ///

Contains the results of the DescribeSnapshots operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSnapshotsResult { ///

If not null, more results are available. Pass this value in the NextToken member of a subsequent call to DescribeSnapshots.

#[serde(rename = "NextToken")] @@ -704,7 +703,7 @@ pub struct DescribeTrustsRequest { ///

The result of a DescribeTrust request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrustsResult { ///

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeTrusts to retrieve the next set of items.

#[serde(rename = "NextToken")] @@ -735,7 +734,7 @@ pub struct DirectoryConnectSettings { ///

Contains information about an AD Connector directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectoryConnectSettingsDescription { ///

A list of the Availability Zones that the directory is in.

#[serde(rename = "AvailabilityZones")] @@ -765,7 +764,7 @@ pub struct DirectoryConnectSettingsDescription { ///

Contains information about an AWS Directory Service directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectoryDescription { ///

The access URL for the directory, such as http://<alias>.awsapps.com. If no alias has been created for the directory, <alias> is the directory identifier, such as d-XXXXXXXXXX.

#[serde(rename = "AccessUrl")] @@ -867,7 +866,7 @@ pub struct DirectoryDescription { ///

Contains directory limit information for a region.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectoryLimits { ///

The current number of cloud directories in the region.

#[serde(rename = "CloudOnlyDirectoriesCurrentCount")] @@ -920,7 +919,7 @@ pub struct DirectoryVpcSettings { ///

Contains information about the directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DirectoryVpcSettingsDescription { ///

The list of Availability Zones that the directory is in.

#[serde(rename = "AvailabilityZones")] @@ -950,7 +949,7 @@ pub struct DisableRadiusRequest { ///

Contains the results of the DisableRadius operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableRadiusResult {} ///

Contains the inputs for the DisableSso operation.

@@ -971,12 +970,12 @@ pub struct DisableSsoRequest { ///

Contains the results of the DisableSso operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableSsoResult {} ///

Contains information about the domain controllers for a specified directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainController { ///

The Availability Zone where the domain controller is located.

#[serde(rename = "AvailabilityZone")] @@ -1033,7 +1032,7 @@ pub struct EnableRadiusRequest { ///

Contains the results of the EnableRadius operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableRadiusResult {} ///

Contains the inputs for the EnableSso operation.

@@ -1054,12 +1053,12 @@ pub struct EnableSsoRequest { ///

Contains the results of the EnableSso operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableSsoResult {} ///

Information about SNS topic and AWS Directory Service directory associations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventTopic { ///

The date and time of when you associated your directory with the SNS topic.

#[serde(rename = "CreatedDateTime")] @@ -1089,7 +1088,7 @@ pub struct GetDirectoryLimitsRequest {} ///

Contains the results of the GetDirectoryLimits operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDirectoryLimitsResult { ///

A DirectoryLimits object that contains the directory limits for the current region.

#[serde(rename = "DirectoryLimits")] @@ -1107,7 +1106,7 @@ pub struct GetSnapshotLimitsRequest { ///

Contains the results of the GetSnapshotLimits operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSnapshotLimitsResult { ///

A SnapshotLimits object that contains the manual snapshot limits for the specified directory.

#[serde(rename = "SnapshotLimits")] @@ -1130,7 +1129,7 @@ pub struct IpRoute { ///

Information about one or more IP address blocks.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IpRouteInfo { ///

The date and time the address block was added to the directory.

#[serde(rename = "AddedDateTime")] @@ -1174,7 +1173,7 @@ pub struct ListIpRoutesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIpRoutesResult { ///

A list of IpRoutes.

#[serde(rename = "IpRoutesInfo")] @@ -1203,7 +1202,7 @@ pub struct ListLogSubscriptionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLogSubscriptionsResult { ///

A list of active LogSubscription objects for calling the AWS account.

#[serde(rename = "LogSubscriptions")] @@ -1231,7 +1230,7 @@ pub struct ListSchemaExtensionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSchemaExtensionsResult { ///

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to ListSchemaExtensions to retrieve the next set of items.

#[serde(rename = "NextToken")] @@ -1259,7 +1258,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResult { ///

Reserved for future use.

#[serde(rename = "NextToken")] @@ -1273,7 +1272,7 @@ pub struct ListTagsForResourceResult { ///

Represents a log subscription, which tracks real-time data from a chosen log group to a specified destination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogSubscription { ///

Identifier (ID) of the directory that you want to associate with the log subscription.

#[serde(rename = "DirectoryId")] @@ -1291,7 +1290,7 @@ pub struct LogSubscription { ///

Describes the directory owner account details that have been shared to the directory consumer account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OwnerDirectoryDescription { ///

Identifier of the directory owner account.

#[serde(rename = "AccountId")] @@ -1369,7 +1368,7 @@ pub struct RegisterEventTopicRequest { ///

The result of a RegisterEventTopic request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterEventTopicResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1380,7 +1379,7 @@ pub struct RejectSharedDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectSharedDirectoryResult { ///

Identifier of the shared directory in the directory consumer account.

#[serde(rename = "SharedDirectoryId")] @@ -1399,7 +1398,7 @@ pub struct RemoveIpRoutesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveIpRoutesResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1413,7 +1412,7 @@ pub struct RemoveTagsFromResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsFromResourceResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1430,7 +1429,7 @@ pub struct ResetUserPasswordRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetUserPasswordResult {} ///

An object representing the inputs for the RestoreFromSnapshot operation.

@@ -1443,12 +1442,12 @@ pub struct RestoreFromSnapshotRequest { ///

Contains the results of the RestoreFromSnapshot operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreFromSnapshotResult {} ///

Information about a schema extension.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SchemaExtensionInfo { ///

A description of the schema extension.

#[serde(rename = "Description")] @@ -1498,7 +1497,7 @@ pub struct ShareDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ShareDirectoryResult { ///

Identifier of the directory that is stored in the directory consumer account that is shared from the specified directory (DirectoryId).

#[serde(rename = "SharedDirectoryId")] @@ -1519,7 +1518,7 @@ pub struct ShareTarget { ///

Details about the shared directory in the directory owner account for which the share request in the directory consumer account has been accepted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SharedDirectory { ///

The date and time that the shared directory was created.

#[serde(rename = "CreatedDateTime")] @@ -1561,7 +1560,7 @@ pub struct SharedDirectory { ///

Describes a directory snapshot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Snapshot { ///

The directory identifier.

#[serde(rename = "DirectoryId")] @@ -1591,7 +1590,7 @@ pub struct Snapshot { ///

Contains manual snapshot limit information for a directory.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SnapshotLimits { ///

The current number of manual snapshots of the directory.

#[serde(rename = "ManualSnapshotsCurrentCount")] @@ -1624,7 +1623,7 @@ pub struct StartSchemaExtensionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSchemaExtensionResult { ///

The identifier of the schema extension that will be applied.

#[serde(rename = "SchemaExtensionId")] @@ -1645,7 +1644,7 @@ pub struct Tag { ///

Describes a trust relationship between an AWS Managed Microsoft AD directory and an external domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Trust { ///

The date and time that the trust relationship was created.

#[serde(rename = "CreatedDateTime")] @@ -1704,7 +1703,7 @@ pub struct UnshareDirectoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnshareDirectoryResult { ///

Identifier of the directory stored in the directory consumer account that is to be unshared from the specified directory (DirectoryId).

#[serde(rename = "SharedDirectoryId")] @@ -1739,7 +1738,7 @@ pub struct UpdateConditionalForwarderRequest { ///

The result of an UpdateConditionalForwarder request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConditionalForwarderResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1753,7 +1752,7 @@ pub struct UpdateNumberOfDomainControllersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNumberOfDomainControllersResult {} ///

Contains the inputs for the UpdateRadius operation.

@@ -1769,7 +1768,7 @@ pub struct UpdateRadiusRequest { ///

Contains the results of the UpdateRadius operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRadiusResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1784,7 +1783,7 @@ pub struct UpdateTrustRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTrustResult { #[serde(rename = "RequestId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1805,7 +1804,7 @@ pub struct VerifyTrustRequest { ///

Result of a VerifyTrust request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VerifyTrustResult { ///

The unique Trust ID of the trust relationship that was verified.

#[serde(rename = "TrustId")] @@ -5159,10 +5158,7 @@ impl DirectoryServiceClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DirectoryServiceClient { - DirectoryServiceClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5176,10 +5172,14 @@ impl DirectoryServiceClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DirectoryServiceClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DirectoryServiceClient { + DirectoryServiceClient { client, region } } } diff --git a/rusoto/services/dynamodb/Cargo.toml b/rusoto/services/dynamodb/Cargo.toml index 9e83d73b7b1..ca2037c20d1 100644 --- a/rusoto/services/dynamodb/Cargo.toml +++ b/rusoto/services/dynamodb/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_dynamodb" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/dynamodb/README.md b/rusoto/services/dynamodb/README.md index 091783f9448..1a177343431 100644 --- a/rusoto/services/dynamodb/README.md +++ b/rusoto/services/dynamodb/README.md @@ -23,9 +23,16 @@ To use `rusoto_dynamodb` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_dynamodb = "0.40.0" +rusoto_dynamodb = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/dynamodb/examples/list_tables.rs b/rusoto/services/dynamodb/examples/list_tables.rs index db4efe73962..e36807e2983 100644 --- a/rusoto/services/dynamodb/examples/list_tables.rs +++ b/rusoto/services/dynamodb/examples/list_tables.rs @@ -13,20 +13,18 @@ fn main() { let list_tables_input: ListTablesInput = Default::default(); match client.list_tables(list_tables_input).sync() { - Ok(output) => { - match output.table_names { - Some(table_name_list) => { - println!("Tables in database:"); + Ok(output) => match output.table_names { + Some(table_name_list) => { + println!("Tables in database:"); - for table_name in table_name_list { - println!("{}", table_name); - } - }, - None => println!("No tables in database!"), + for table_name in table_name_list { + println!("{}", table_name); + } } + None => println!("No tables in database!"), }, Err(error) => { println!("Error: {:?}", error); - }, + } } } diff --git a/rusoto/services/dynamodb/src/custom/custom_tests.rs b/rusoto/services/dynamodb/src/custom/custom_tests.rs index 27c87539b49..0f6cce25fef 100644 --- a/rusoto/services/dynamodb/src/custom/custom_tests.rs +++ b/rusoto/services/dynamodb/src/custom/custom_tests.rs @@ -1,8 +1,8 @@ -use crate::generated::{AttributeValue}; +use crate::generated::AttributeValue; #[test] fn attribute_value_default_is_empty() { - let all_default = AttributeValue{ + let all_default = AttributeValue { ..Default::default() }; @@ -12,7 +12,7 @@ fn attribute_value_default_is_empty() { #[test] fn attribute_value_with_blob_contains_only_blob() { - let all_default = AttributeValue{ + let all_default = AttributeValue { b: Some("foo".bytes().collect()), ..Default::default() }; @@ -23,7 +23,7 @@ fn attribute_value_with_blob_contains_only_blob() { #[test] fn attribute_value_with_number_contains_only_number() { - let all_default = AttributeValue{ + let all_default = AttributeValue { n: Some(1234.to_string()), ..Default::default() }; @@ -34,11 +34,11 @@ fn attribute_value_with_number_contains_only_number() { #[test] fn attribute_value_with_binary_set() { - let all_default = AttributeValue{ + let all_default = AttributeValue { bs: Some(vec![ "foo".bytes().collect(), "bar".bytes().collect(), - "baz".bytes().collect() + "baz".bytes().collect(), ]), ..Default::default() }; diff --git a/rusoto/services/dynamodb/src/generated.rs b/rusoto/services/dynamodb/src/generated.rs index f284d584342..df6dd7dc511 100644 --- a/rusoto/services/dynamodb/src/generated.rs +++ b/rusoto/services/dynamodb/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -105,7 +104,7 @@ pub struct AttributeValueUpdate { ///

Represents the properties of the scaling policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingPolicyDescription { ///

The name of the scaling policy.

#[serde(rename = "PolicyName")] @@ -133,7 +132,7 @@ pub struct AutoScalingPolicyUpdate { ///

Represents the autoscaling settings for a global table or global secondary index.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingSettingsDescription { ///

Disabled autoscaling for this global table or global secondary index.

#[serde(rename = "AutoScalingDisabled")] @@ -184,7 +183,7 @@ pub struct AutoScalingSettingsUpdate { ///

Represents the properties of a target tracking scaling policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingTargetTrackingScalingPolicyConfigurationDescription { ///

Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.

#[serde(rename = "DisableScaleIn")] @@ -225,7 +224,7 @@ pub struct AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { ///

Contains the description of the backup created for the table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BackupDescription { ///

Contains the details of the backup created for the table.

#[serde(rename = "BackupDetails")] @@ -243,7 +242,7 @@ pub struct BackupDescription { ///

Contains the details of the backup created for the table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BackupDetails { ///

ARN associated with the backup.

#[serde(rename = "BackupArn")] @@ -272,7 +271,7 @@ pub struct BackupDetails { ///

Contains details for the backup.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BackupSummary { ///

ARN associated with the backup.

#[serde(rename = "BackupArn")] @@ -319,7 +318,7 @@ pub struct BackupSummary { ///

Represents the input of a BatchGetItem operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchGetItemInput { - ///

A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.

Each element in the map of items to retrieve consists of the following:

  • ConsistentRead - If true, a strongly consistent read is used; if false (the default), an eventually consistent read is used.

  • ExpressionAttributeNames - One or more substitution tokens for attribute names in the ProjectionExpression parameter. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {"#P":"Percentile"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

  • Keys - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key value. For a composite key, you must provide both the partition key value and the sort key value.

  • ProjectionExpression - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

  • AttributesToGet - This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide.

+ ///

A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.

Each element in the map of items to retrieve consists of the following:

  • ConsistentRead - If true, a strongly consistent read is used; if false (the default), an eventually consistent read is used.

  • ExpressionAttributeNames - One or more substitution tokens for attribute names in the ProjectionExpression parameter. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {"#P":"Percentile"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

  • Keys - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key value. For a composite key, you must provide both the partition key value and the sort key value.

  • ProjectionExpression - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes are returned. If any of the requested attributes are not found, they do not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

  • AttributesToGet - This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide.

#[serde(rename = "RequestItems")] pub request_items: ::std::collections::HashMap, #[serde(rename = "ReturnConsumedCapacity")] @@ -329,7 +328,7 @@ pub struct BatchGetItemInput { ///

Represents the output of a BatchGetItem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetItemOutput { ///

The read capacity units consumed by the entire BatchGetItem operation.

Each element consists of:

  • TableName - The table that consumed the provisioned throughput.

  • CapacityUnits - The total number of capacity units consumed.

#[serde(rename = "ConsumedCapacity")] @@ -353,7 +352,7 @@ pub struct BatchGetItemOutput { ///

Represents the input of a BatchWriteItem operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchWriteItemInput { - ///

A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest). Each element in the map consists of the following:

  • DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:

    • Key - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

  • PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:

    • Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception.

      If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

+ ///

A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest). Each element in the map consists of the following:

  • DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:

    • Key - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

  • PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:

    • Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values are rejected with a ValidationException exception.

      If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

#[serde(rename = "RequestItems")] pub request_items: ::std::collections::HashMap>, #[serde(rename = "ReturnConsumedCapacity")] @@ -367,7 +366,7 @@ pub struct BatchWriteItemInput { ///

Represents the output of a BatchWriteItem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchWriteItemOutput { ///

The capacity units consumed by the entire BatchWriteItem operation.

Each element consists of:

  • TableName - The table that consumed the provisioned throughput.

  • CapacityUnits - The total number of capacity units consumed.

#[serde(rename = "ConsumedCapacity")] @@ -386,7 +385,7 @@ pub struct BatchWriteItemOutput { ///

Contains the details for the read/write capacity mode.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BillingModeSummary { ///

Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.

  • PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend using PROVISIONED for predictable workloads.

  • PAYPERREQUEST - Sets the read/write capacity mode to PAYPERREQUEST. We recommend using PAYPERREQUEST for unpredictable workloads.

#[serde(rename = "BillingMode")] @@ -411,7 +410,7 @@ pub struct CancellationReason { ///

Represents the amount of provisioned throughput capacity consumed on a table or an index.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Capacity { ///

The total number of capacity units consumed on a table or an index.

#[serde(rename = "CapacityUnits")] @@ -467,7 +466,7 @@ pub struct ConditionCheck { ///

The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConsumedCapacity { ///

The total number of capacity units consumed by the operation.

#[serde(rename = "CapacityUnits")] @@ -501,7 +500,7 @@ pub struct ConsumedCapacity { ///

Represents the continuous backups and point in time recovery settings on the table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContinuousBackupsDescription { ///

ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED

#[serde(rename = "ContinuousBackupsStatus")] @@ -523,7 +522,7 @@ pub struct CreateBackupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBackupOutput { ///

Contains the details of the backup created for the table.

#[serde(rename = "BackupDetails")] @@ -554,13 +553,13 @@ pub struct CreateGlobalTableInput { ///

The global table name.

#[serde(rename = "GlobalTableName")] pub global_table_name: String, - ///

The regions where the global table needs to be created.

+ ///

The Regions where the global table needs to be created.

#[serde(rename = "ReplicationGroup")] pub replication_group: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGlobalTableOutput { ///

Contains the details of the global table.

#[serde(rename = "GlobalTableDescription")] @@ -586,14 +585,14 @@ pub struct CreateTableInput { #[serde(rename = "BillingMode")] #[serde(skip_serializing_if = "Option::is_none")] pub billing_mode: Option, - ///

One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index in the array includes the following:

  • IndexName - The name of the global secondary index. Must be unique only for this table.

  • KeySchema - Specifies the key schema for the global secondary index.

  • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

    • ProjectionType - One of the following:

      • KEYS_ONLY - Only the index and primary keys are projected into the index.

      • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

      • ALL - All of the table attributes are projected into the index.

    • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

  • ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units.

+ ///

One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index in the array includes the following:

  • IndexName - The name of the global secondary index. Must be unique only for this table.

  • KeySchema - Specifies the key schema for the global secondary index.

  • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

    • ProjectionType - One of the following:

      • KEYS_ONLY - Only the index and primary keys are projected into the index.

      • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes is in NonKeyAttributes.

      • ALL - All of the table attributes are projected into the index.

    • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

  • ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units.

#[serde(rename = "GlobalSecondaryIndexes")] #[serde(skip_serializing_if = "Option::is_none")] pub global_secondary_indexes: Option>, - ///

Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model in the Amazon DynamoDB Developer Guide.

Each KeySchemaElement in the array is composed of:

  • AttributeName - The name of this key attribute.

  • KeyType - The role that the key attribute will assume:

    • HASH - partition key

    • RANGE - sort key

The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

For a simple primary key (partition key), you must provide exactly one element with a KeyType of HASH.

For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE.

For more information, see Specifying the Primary Key in the Amazon DynamoDB Developer Guide.

+ ///

Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model in the Amazon DynamoDB Developer Guide.

Each KeySchemaElement in the array is composed of:

  • AttributeName - The name of this key attribute.

  • KeyType - The role that the key attribute will assume:

    • HASH - partition key

    • RANGE - sort key

The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from the DynamoDB usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

For a simple primary key (partition key), you must provide exactly one element with a KeyType of HASH.

For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE.

For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

#[serde(rename = "KeySchema")] pub key_schema: Vec, - ///

One or more local secondary indexes (the maximum is 5) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.

Each local secondary index in the array includes the following:

  • IndexName - The name of the local secondary index. Must be unique only for this table.

  • KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same partition key as the table.

  • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

    • ProjectionType - One of the following:

      • KEYS_ONLY - Only the index and primary keys are projected into the index.

      • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

      • ALL - All of the table attributes are projected into the index.

    • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

+ ///

One or more local secondary indexes (the maximum is 5) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.

Each local secondary index in the array includes the following:

  • IndexName - The name of the local secondary index. Must be unique only for this table.

  • KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same partition key as the table.

  • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

    • ProjectionType - One of the following:

      • KEYS_ONLY - Only the index and primary keys are projected into the index.

      • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes is in NonKeyAttributes.

      • ALL - All of the table attributes are projected into the index.

    • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

#[serde(rename = "LocalSecondaryIndexes")] #[serde(skip_serializing_if = "Option::is_none")] pub local_secondary_indexes: Option>, @@ -605,7 +604,7 @@ pub struct CreateTableInput { #[serde(rename = "SSESpecification")] #[serde(skip_serializing_if = "Option::is_none")] pub sse_specification: Option, - ///

The settings for DynamoDB Streams on the table. These settings consist of:

  • StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled (false).

  • StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are:

    • KEYSONLY - Only the key attributes of the modified item are written to the stream.

    • NEWIMAGE - The entire item, as it appears after it was modified, is written to the stream.

    • OLDIMAGE - The entire item, as it appeared before it was modified, is written to the stream.

    • NEWANDOLDIMAGES - Both the new and the old item images of the item are written to the stream.

+ ///

The settings for DynamoDB Streams on the table. These settings consist of:

  • StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) or disabled (false).

  • StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are:

    • KEYSONLY - Only the key attributes of the modified item are written to the stream.

    • NEWIMAGE - The entire item, as it appears after it was modified, is written to the stream.

    • OLDIMAGE - The entire item, as it appeared before it was modified, is written to the stream.

    • NEWANDOLDIMAGES - Both the new and the old item images of the item are written to the stream.

#[serde(rename = "StreamSpecification")] #[serde(skip_serializing_if = "Option::is_none")] pub stream_specification: Option, @@ -620,7 +619,7 @@ pub struct CreateTableInput { ///

Represents the output of a CreateTable operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTableOutput { ///

Represents the properties of the table.

#[serde(rename = "TableDescription")] @@ -663,7 +662,7 @@ pub struct DeleteBackupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBackupOutput { ///

Contains the description of the backup created for the table.

#[serde(rename = "BackupDescription")] @@ -682,7 +681,7 @@ pub struct DeleteGlobalSecondaryIndexAction { ///

Represents the input of a DeleteItem operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteItemInput { - ///

A condition that must be satisfied in order for a conditional DeleteItem to succeed.

An expression can contain any of the following:

  • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

    These function names are case-sensitive.

  • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

  • Logical operators: AND | OR | NOT

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

A condition that must be satisfied in order for a conditional DeleteItem to succeed.

An expression can contain any of the following:

  • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

    These function names are case-sensitive.

  • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

  • Logical operators: AND | OR | NOT

For more information about condition expressions, see Condition Expressions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConditionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub condition_expression: Option, @@ -694,11 +693,11 @@ pub struct DeleteItemInput { #[serde(rename = "Expected")] #[serde(skip_serializing_if = "Option::is_none")] pub expected: Option<::std::collections::HashMap>, - ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeNames")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_names: Option<::std::collections::HashMap>, - ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeValues")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_values: Option<::std::collections::HashMap>, @@ -723,13 +722,13 @@ pub struct DeleteItemInput { ///

Represents the output of a DeleteItem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteItemOutput { ///

A map of attribute names to AttributeValue objects, representing the item as it appeared before the DeleteItem operation. This map appears in the response only if ReturnValues was specified as ALL_OLD in the request.

#[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

+ ///

The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Mode in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub consumed_capacity: Option, @@ -765,7 +764,7 @@ pub struct DeleteTableInput { ///

Represents the output of a DeleteTable operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTableOutput { ///

Represents the properties of a table.

#[serde(rename = "TableDescription")] @@ -775,13 +774,13 @@ pub struct DeleteTableOutput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeBackupInput { - ///

The ARN associated with the backup.

+ ///

The Amazon Resource Name (ARN) associated with the backup.

#[serde(rename = "BackupArn")] pub backup_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBackupOutput { ///

Contains the description of the backup created for the table.

#[serde(rename = "BackupDescription")] @@ -797,7 +796,7 @@ pub struct DescribeContinuousBackupsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeContinuousBackupsOutput { ///

Represents the continuous backups and point in time recovery settings on the table.

#[serde(rename = "ContinuousBackupsDescription")] @@ -809,7 +808,7 @@ pub struct DescribeContinuousBackupsOutput { pub struct DescribeEndpointsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointsResponse { ///

List of endpoints.

#[serde(rename = "Endpoints")] @@ -824,7 +823,7 @@ pub struct DescribeGlobalTableInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGlobalTableOutput { ///

Contains the details of the global table.

#[serde(rename = "GlobalTableDescription")] @@ -840,13 +839,13 @@ pub struct DescribeGlobalTableSettingsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGlobalTableSettingsOutput { ///

The name of the global table.

#[serde(rename = "GlobalTableName")] #[serde(skip_serializing_if = "Option::is_none")] pub global_table_name: Option, - ///

The region specific settings for the global table.

+ ///

The Region-specific settings for the global table.

#[serde(rename = "ReplicaSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub replica_settings: Option>, @@ -858,21 +857,21 @@ pub struct DescribeLimitsInput {} ///

Represents the output of a DescribeLimits operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLimitsOutput { - ///

The maximum total read capacity units that your account allows you to provision across all of your tables in this region.

+ ///

The maximum total read capacity units that your account allows you to provision across all of your tables in this Region.

#[serde(rename = "AccountMaxReadCapacityUnits")] #[serde(skip_serializing_if = "Option::is_none")] pub account_max_read_capacity_units: Option, - ///

The maximum total write capacity units that your account allows you to provision across all of your tables in this region.

+ ///

The maximum total write capacity units that your account allows you to provision across all of your tables in this Region.

#[serde(rename = "AccountMaxWriteCapacityUnits")] #[serde(skip_serializing_if = "Option::is_none")] pub account_max_write_capacity_units: Option, - ///

The maximum read capacity units that your account allows you to provision for a new table that you are creating in this region, including the read capacity units provisioned for its global secondary indexes (GSIs).

+ ///

The maximum read capacity units that your account allows you to provision for a new table that you are creating in this Region, including the read capacity units provisioned for its global secondary indexes (GSIs).

#[serde(rename = "TableMaxReadCapacityUnits")] #[serde(skip_serializing_if = "Option::is_none")] pub table_max_read_capacity_units: Option, - ///

The maximum write capacity units that your account allows you to provision for a new table that you are creating in this region, including the write capacity units provisioned for its global secondary indexes (GSIs).

+ ///

The maximum write capacity units that your account allows you to provision for a new table that you are creating in this Region, including the write capacity units provisioned for its global secondary indexes (GSIs).

#[serde(rename = "TableMaxWriteCapacityUnits")] #[serde(skip_serializing_if = "Option::is_none")] pub table_max_write_capacity_units: Option, @@ -888,7 +887,7 @@ pub struct DescribeTableInput { ///

Represents the output of a DescribeTable operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTableOutput { ///

The properties of the table.

#[serde(rename = "Table")] @@ -904,7 +903,7 @@ pub struct DescribeTimeToLiveInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTimeToLiveOutput { ///

#[serde(rename = "TimeToLiveDescription")] @@ -914,7 +913,7 @@ pub struct DescribeTimeToLiveOutput { ///

An endpoint information details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Endpoint { ///

IP address of the endpoint.

#[serde(rename = "Address")] @@ -975,14 +974,14 @@ pub struct GetItemInput { #[serde(rename = "ConsistentRead")] #[serde(skip_serializing_if = "Option::is_none")] pub consistent_read: Option, - ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeNames")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_names: Option<::std::collections::HashMap>, ///

A map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve.

For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

#[serde(rename = "Key")] pub key: ::std::collections::HashMap, - ///

A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

If no attribute names are specified, then all attributes are returned. If any of the requested attributes are not found, they do not appear in the result.

For more information, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ProjectionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub projection_expression: Option, @@ -996,9 +995,9 @@ pub struct GetItemInput { ///

Represents the output of a GetItem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetItemOutput { - ///

The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

+ ///

The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Read/Write Capacity Mode in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub consumed_capacity: Option, @@ -1028,7 +1027,7 @@ pub struct GlobalSecondaryIndex { ///

Represents the properties of a global secondary index.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GlobalSecondaryIndexDescription { ///

Indicates whether the index is currently backfilling. Backfilling is the process of reading items from the table and determining whether they can be added to the index. (Not all items will qualify: For example, a partition key cannot have any duplicate values.) If an item can be added to the index, DynamoDB will do so. After all items have been processed, the backfilling operation is complete and Backfilling is false.

For indexes that were created during a CreateTable operation, the Backfilling attribute does not appear in the DescribeTable output.

#[serde(rename = "Backfilling")] @@ -1070,7 +1069,7 @@ pub struct GlobalSecondaryIndexDescription { ///

Represents the properties of a global secondary index for the table when the backup was created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GlobalSecondaryIndexInfo { ///

The name of the global secondary index.

#[serde(rename = "IndexName")] @@ -1109,7 +1108,7 @@ pub struct GlobalSecondaryIndexUpdate { ///

Represents the properties of a global table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GlobalTable { ///

The global table name.

#[serde(rename = "GlobalTableName")] @@ -1123,7 +1122,7 @@ pub struct GlobalTable { ///

Contains details about the global table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GlobalTableDescription { ///

The creation time of the global table.

#[serde(rename = "CreationDateTime")] @@ -1165,7 +1164,7 @@ pub struct GlobalTableGlobalSecondaryIndexSettingsUpdate { ///

Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ItemCollectionMetrics { ///

The partition key value of the item collection. This value is the same as the partition key value of the item.

#[serde(rename = "ItemCollectionKey")] @@ -1179,7 +1178,7 @@ pub struct ItemCollectionMetrics { ///

Details for the requested item.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ItemResponse { ///

Map of attribute data consisting of the data type and attribute value.

#[serde(rename = "Item")] @@ -1228,7 +1227,7 @@ pub struct ListBackupsInput { #[serde(rename = "BackupType")] #[serde(skip_serializing_if = "Option::is_none")] pub backup_type: Option, - ///

LastEvaluatedBackupArn is the ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

+ ///

LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

#[serde(rename = "ExclusiveStartBackupArn")] #[serde(skip_serializing_if = "Option::is_none")] pub exclusive_start_backup_arn: Option, @@ -1251,13 +1250,13 @@ pub struct ListBackupsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBackupsOutput { ///

List of BackupSummary objects.

#[serde(rename = "BackupSummaries")] #[serde(skip_serializing_if = "Option::is_none")] pub backup_summaries: Option>, - ///

The ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

If LastEvaluatedBackupArn is empty, then the last page of results has been processed and there are no more results to be retrieved.

If LastEvaluatedBackupArn is not empty, this may or may not indicate there is more data to be returned. All results are guaranteed to have been returned if and only if no value for LastEvaluatedBackupArn is returned.

+ ///

The ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

If LastEvaluatedBackupArn is empty, then the last page of results has been processed and there are no more results to be retrieved.

If LastEvaluatedBackupArn is not empty, this may or may not indicate that there is more data to be returned. All results are guaranteed to have been returned if and only if no value for LastEvaluatedBackupArn is returned.

#[serde(rename = "LastEvaluatedBackupArn")] #[serde(skip_serializing_if = "Option::is_none")] pub last_evaluated_backup_arn: Option, @@ -1273,14 +1272,14 @@ pub struct ListGlobalTablesInput { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

Lists the global tables in a specific region.

+ ///

Lists the global tables in a specific Region.

#[serde(rename = "RegionName")] #[serde(skip_serializing_if = "Option::is_none")] pub region_name: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGlobalTablesOutput { ///

List of global table names.

#[serde(rename = "GlobalTables")] @@ -1307,7 +1306,7 @@ pub struct ListTablesInput { ///

Represents the output of a ListTables operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTablesOutput { ///

The name of the last table in the current page of results. Use this value as the ExclusiveStartTableName in a new request to obtain the next page of results, until all the table names are returned.

If you do not receive a LastEvaluatedTableName value in the response, this means that there are no more table names to be retrieved.

#[serde(rename = "LastEvaluatedTableName")] @@ -1331,7 +1330,7 @@ pub struct ListTagsOfResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsOfResourceOutput { ///

If this value is returned, there are additional results to be displayed. To retrieve them, call ListTagsOfResource again, with NextToken set to this value.

#[serde(rename = "NextToken")] @@ -1359,7 +1358,7 @@ pub struct LocalSecondaryIndex { ///

Represents the properties of a local secondary index.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LocalSecondaryIndexDescription { ///

The Amazon Resource Name (ARN) that uniquely identifies the index.

#[serde(rename = "IndexArn")] @@ -1389,7 +1388,7 @@ pub struct LocalSecondaryIndexDescription { ///

Represents the properties of a local secondary index for the table when the backup was created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LocalSecondaryIndexInfo { ///

Represents the name of the local secondary index.

#[serde(rename = "IndexName")] @@ -1407,7 +1406,7 @@ pub struct LocalSecondaryIndexInfo { ///

The description of the point in time settings applied to the table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PointInTimeRecoveryDescription { ///

Specifies the earliest point in time you can restore your table to. It You can restore your table to any point in time during the last 35 days.

#[serde(rename = "EarliestRestorableDateTime")] @@ -1457,7 +1456,7 @@ pub struct ProvisionedThroughput { ///

Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionedThroughputDescription { ///

The date and time of the last provisioned throughput decrease for this table.

#[serde(rename = "LastDecreaseDateTime")] @@ -1511,7 +1510,7 @@ pub struct Put { ///

Represents the input of a PutItem operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutItemInput { - ///

A condition that must be satisfied in order for a conditional PutItem operation to succeed.

An expression can contain any of the following:

  • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

    These function names are case-sensitive.

  • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

  • Logical operators: AND | OR | NOT

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

A condition that must be satisfied in order for a conditional PutItem operation to succeed.

An expression can contain any of the following:

  • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

    These function names are case-sensitive.

  • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

  • Logical operators: AND | OR | NOT

For more information on condition expressions, see Condition Expressions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConditionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub condition_expression: Option, @@ -1523,15 +1522,15 @@ pub struct PutItemInput { #[serde(rename = "Expected")] #[serde(skip_serializing_if = "Option::is_none")] pub expected: Option<::std::collections::HashMap>, - ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeNames")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_names: Option<::std::collections::HashMap>, - ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeValues")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_values: Option<::std::collections::HashMap>, - ///

A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.

You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.

If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

Each element in the Item map is an AttributeValue object.

+ ///

A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.

You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.

If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

Each element in the Item map is an AttributeValue object.

#[serde(rename = "Item")] pub item: ::std::collections::HashMap, #[serde(rename = "ReturnConsumedCapacity")] @@ -1552,13 +1551,13 @@ pub struct PutItemInput { ///

Represents the output of a PutItem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutItemOutput { ///

The attribute values as they appeared before the PutItem operation, but only if ReturnValues is specified as ALL_OLD in the request. Each element consists of an attribute name and an attribute value.

#[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

+ ///

The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Read/Write Capacity Mode in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub consumed_capacity: Option, @@ -1591,11 +1590,11 @@ pub struct QueryInput { #[serde(rename = "ConsistentRead")] #[serde(skip_serializing_if = "Option::is_none")] pub consistent_read: Option, - ///

The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.

The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.

+ ///

The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.

The data type for ExclusiveStartKey must be String, Number, or Binary. No set data types are allowed.

#[serde(rename = "ExclusiveStartKey")] #[serde(skip_serializing_if = "Option::is_none")] pub exclusive_start_key: Option<::std::collections::HashMap>, - ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeNames")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_names: Option<::std::collections::HashMap>, @@ -1611,7 +1610,7 @@ pub struct QueryInput { #[serde(rename = "IndexName")] #[serde(skip_serializing_if = "Option::is_none")] pub index_name: Option, - ///

The condition that specifies the key value(s) for items to be retrieved by the Query action.

The condition must perform an equality test on a single partition key value.

The condition can optionally perform one of several comparison tests on a single sort key value. This allows Query to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values.

The partition key equality test is required, and must be specified in the following format:

partitionKeyName = :partitionkeyval

If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key:

partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval

Valid comparisons for the sort key condition are as follows:

  • sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval.

  • sortKeyName < :sortkeyval - true if the sort key value is less than :sortkeyval.

  • sortKeyName <= :sortkeyval - true if the sort key value is less than or equal to :sortkeyval.

  • sortKeyName > :sortkeyval - true if the sort key value is greater than :sortkeyval.

  • sortKeyName >= :sortkeyval - true if the sort key value is greater than or equal to :sortkeyval.

  • sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key value is greater than or equal to :sortkeyval1, and less than or equal to :sortkeyval2.

  • begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name begins_with is case-sensitive.

Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime.

You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

  • Size = :myval

To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

  • #S = :myval

For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

+ ///

The condition that specifies the key values for items to be retrieved by the Query action.

The condition must perform an equality test on a single partition key value.

The condition can optionally perform one of several comparison tests on a single sort key value. This allows Query to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values.

The partition key equality test is required, and must be specified in the following format:

partitionKeyName = :partitionkeyval

If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key:

partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval

Valid comparisons for the sort key condition are as follows:

  • sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval.

  • sortKeyName < :sortkeyval - true if the sort key value is less than :sortkeyval.

  • sortKeyName <= :sortkeyval - true if the sort key value is less than or equal to :sortkeyval.

  • sortKeyName > :sortkeyval - true if the sort key value is greater than :sortkeyval.

  • sortKeyName >= :sortkeyval - true if the sort key value is greater than or equal to :sortkeyval.

  • sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key value is greater than or equal to :sortkeyval1, and less than or equal to :sortkeyval2.

  • begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name begins_with is case-sensitive.

Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime.

You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

  • Size = :myval

To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

  • #S = :myval

For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

#[serde(rename = "KeyConditionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub key_condition_expression: Option, @@ -1619,7 +1618,7 @@ pub struct QueryInput { #[serde(rename = "KeyConditions")] #[serde(skip_serializing_if = "Option::is_none")] pub key_conditions: Option<::std::collections::HashMap>, - ///

The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

+ ///

The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

#[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, @@ -1638,7 +1637,7 @@ pub struct QueryInput { #[serde(rename = "ScanIndexForward")] #[serde(skip_serializing_if = "Option::is_none")] pub scan_index_forward: Option, - ///

The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.

  • ALLATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.

  • ALLPROJECTEDATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALLATTRIBUTES.

  • COUNT - Returns the number of matching items, rather than the matching items themselves.

  • SPECIFICATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

    If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

    If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALLATTRIBUTES when accessing a table, and ALLPROJECTEDATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFICATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFICATTRIBUTES. Any other value for Select will return an error.

+ ///

The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.

  • ALLATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index, DynamoDB fetches the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.

  • ALLPROJECTEDATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALLATTRIBUTES.

  • COUNT - Returns the number of matching items, rather than the matching items themselves.

  • SPECIFICATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

    If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB fetches each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

    If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALLATTRIBUTES when accessing a table, and ALLPROJECTEDATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFICATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFICATTRIBUTES. Any other value for Select will return an error.

#[serde(rename = "Select")] #[serde(skip_serializing_if = "Option::is_none")] pub select: Option, @@ -1649,9 +1648,9 @@ pub struct QueryInput { ///

Represents the output of a Query operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryOutput { - ///

The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

+ ///

The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub consumed_capacity: Option, @@ -1684,7 +1683,7 @@ pub struct Replica { ///

Contains the details of the replica.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicaDescription { ///

The name of the region.

#[serde(rename = "RegionName")] @@ -1694,7 +1693,7 @@ pub struct ReplicaDescription { ///

Represents the properties of a global secondary index.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicaGlobalSecondaryIndexSettingsDescription { ///

The name of the global secondary index. The name must be unique among all other indexes on this table.

#[serde(rename = "IndexName")] @@ -1739,7 +1738,7 @@ pub struct ReplicaGlobalSecondaryIndexSettingsUpdate { ///

Represents the properties of a replica.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicaSettingsDescription { ///

The region name of the replica.

#[serde(rename = "RegionName")] @@ -1814,7 +1813,7 @@ pub struct ReplicaUpdate { ///

Contains details for the restore.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreSummary { ///

Point in time or source backup time.

#[serde(rename = "RestoreDateTime")] @@ -1834,7 +1833,7 @@ pub struct RestoreSummary { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RestoreTableFromBackupInput { - ///

The ARN associated with the backup.

+ ///

The Amazon Resource Name (ARN) associated with the backup.

#[serde(rename = "BackupArn")] pub backup_arn: String, ///

The name of the new table to which the backup must be restored.

@@ -1843,7 +1842,7 @@ pub struct RestoreTableFromBackupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreTableFromBackupOutput { ///

The description of the table created from an existing backup.

#[serde(rename = "TableDescription")] @@ -1870,7 +1869,7 @@ pub struct RestoreTableToPointInTimeInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreTableToPointInTimeOutput { ///

Represents the properties of a table.

#[serde(rename = "TableDescription")] @@ -1880,7 +1879,7 @@ pub struct RestoreTableToPointInTimeOutput { ///

The description of the server-side encryption status on the specified table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SSEDescription { ///

The KMS customer master key (CMK) ARN used for the KMS encryption.

#[serde(rename = "KMSMasterKeyArn")] @@ -1924,7 +1923,7 @@ pub struct ScanInput { #[serde(rename = "ConditionalOperator")] #[serde(skip_serializing_if = "Option::is_none")] pub conditional_operator: Option, - ///

A Boolean value that determines the read consistency model during the scan:

  • If ConsistentRead is false, then the data returned from Scan might not contain the results from other recently completed write operations (PutItem, UpdateItem or DeleteItem).

  • If ConsistentRead is true, then all of the write operations that completed before the Scan began are guaranteed to be contained in the Scan response.

The default setting for ConsistentRead is false.

The ConsistentRead parameter is not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

+ ///

A Boolean value that determines the read consistency model during the scan:

  • If ConsistentRead is false, then the data returned from Scan might not contain the results from other recently completed write operations (PutItem, UpdateItem, or DeleteItem).

  • If ConsistentRead is true, then all of the write operations that completed before the Scan began are guaranteed to be contained in the Scan response.

The default setting for ConsistentRead is false.

The ConsistentRead parameter is not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

#[serde(rename = "ConsistentRead")] #[serde(skip_serializing_if = "Option::is_none")] pub consistent_read: Option, @@ -1932,11 +1931,11 @@ pub struct ScanInput { #[serde(rename = "ExclusiveStartKey")] #[serde(skip_serializing_if = "Option::is_none")] pub exclusive_start_key: Option<::std::collections::HashMap>, - ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeNames")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_names: Option<::std::collections::HashMap>, - ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeValues")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_values: Option<::std::collections::HashMap>, @@ -1948,11 +1947,11 @@ pub struct ScanInput { #[serde(rename = "IndexName")] #[serde(skip_serializing_if = "Option::is_none")] pub index_name: Option, - ///

The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

+ ///

The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Working with Queries in the Amazon DynamoDB Developer Guide.

#[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

A string that identifies one or more attributes to retrieve from the specified table or index. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

A string that identifies one or more attributes to retrieve from the specified table or index. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

For more information, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ProjectionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub projection_expression: Option, @@ -1967,7 +1966,7 @@ pub struct ScanInput { #[serde(rename = "Segment")] #[serde(skip_serializing_if = "Option::is_none")] pub segment: Option, - ///

The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.

  • ALLATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.

  • ALLPROJECTEDATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALLATTRIBUTES.

  • COUNT - Returns the number of matching items, rather than the matching items themselves.

  • SPECIFICATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

    If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

    If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALLATTRIBUTES when accessing a table, and ALLPROJECTEDATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFICATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFICATTRIBUTES. Any other value for Select will return an error.

+ ///

The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.

  • ALLATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index, DynamoDB fetches the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.

  • ALLPROJECTEDATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALLATTRIBUTES.

  • COUNT - Returns the number of matching items, rather than the matching items themselves.

  • SPECIFICATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

    If you query or scan a local secondary index and request only attributes that are projected into that index, the operation reads only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB fetches each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

    If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALLATTRIBUTES when accessing a table, and ALLPROJECTEDATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFICATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFICATTRIBUTES. Any other value for Select will return an error.

#[serde(rename = "Select")] #[serde(skip_serializing_if = "Option::is_none")] pub select: Option, @@ -1982,7 +1981,7 @@ pub struct ScanInput { ///

Represents the output of a Scan operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScanOutput { ///

The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConsumedCapacity")] @@ -2008,7 +2007,7 @@ pub struct ScanOutput { ///

Contains the details of the table when the backup was created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SourceTableDetails { ///

Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.

  • PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend using PROVISIONED for predictable workloads.

  • PAYPERREQUEST - Sets the read/write capacity mode to PAYPERREQUEST. We recommend using PAYPERREQUEST for unpredictable workloads.

#[serde(rename = "BillingMode")] @@ -2045,7 +2044,7 @@ pub struct SourceTableDetails { ///

Contains the details of the features enabled on the table when the backup was created. For example, LSIs, GSIs, streams, TTL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SourceTableFeatureDetails { ///

Represents the GSI properties for the table when the backup was created. It includes the IndexName, KeySchema, Projection and ProvisionedThroughput for the GSIs on the table at the time of backup.

#[serde(rename = "GlobalSecondaryIndexes")] @@ -2084,7 +2083,7 @@ pub struct StreamSpecification { ///

Represents the properties of a table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TableDescription { ///

An array of AttributeDefinition objects. Each of these objects describes one attribute in the table and index key schema.

Each AttributeDefinition object in this array is composed of:

  • AttributeName - The name of the attribute.

  • AttributeType - The data type for the attribute.

#[serde(rename = "AttributeDefinitions")] @@ -2106,7 +2105,7 @@ pub struct TableDescription { #[serde(rename = "ItemCount")] #[serde(skip_serializing_if = "Option::is_none")] pub item_count: Option, - ///

The primary key structure for the table. Each KeySchemaElement consists of:

  • AttributeName - The name of the attribute.

  • KeyType - The role of the attribute:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

+ ///

The primary key structure for the table. Each KeySchemaElement consists of:

  • AttributeName - The name of the attribute.

  • KeyType - The role of the attribute:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

#[serde(rename = "KeySchema")] #[serde(skip_serializing_if = "Option::is_none")] pub key_schema: Option>, @@ -2160,10 +2159,10 @@ pub struct TableDescription { pub table_status: Option, } -///

Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table.

AWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

+///

Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table.

AWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { - ///

The key of the tag.Tag keys are case sensitive. Each DynamoDB table can only have up to one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.

+ ///

The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only have up to one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.

#[serde(rename = "Key")] pub key: String, ///

The value of the tag. Tag values are case-sensitive and can be null.

@@ -2183,25 +2182,25 @@ pub struct TagResourceInput { ///

The description of the Time to Live (TTL) status on the specified table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TimeToLiveDescription { - ///

The name of the Time to Live attribute for items in the table.

+ ///

The name of the TTL attribute for items in the table.

#[serde(rename = "AttributeName")] #[serde(skip_serializing_if = "Option::is_none")] pub attribute_name: Option, - ///

The Time to Live status for the table.

+ ///

The TTL status for the table.

#[serde(rename = "TimeToLiveStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub time_to_live_status: Option, } -///

Represents the settings used to enable or disable Time to Live for the specified table.

+///

Represents the settings used to enable or disable Time to Live (TTL) for the specified table.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TimeToLiveSpecification { - ///

The name of the Time to Live attribute used to store the expiration time for items in the table.

+ ///

The name of the TTL attribute used to store the expiration time for items in the table.

#[serde(rename = "AttributeName")] pub attribute_name: String, - ///

Indicates whether Time To Live is to be enabled (true) or disabled (false) on the table.

+ ///

Indicates whether TTL is to be enabled (true) or disabled (false) on the table.

#[serde(rename = "Enabled")] pub enabled: bool, } @@ -2220,19 +2219,19 @@ pub struct TransactGetItemsInput { #[serde(rename = "ReturnConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub return_consumed_capacity: Option, - ///

An ordered array of up to 10 TransactGetItem objects, each of which contains a Get structure.

+ ///

An ordered array of up to 25 TransactGetItem objects, each of which contains a Get structure.

#[serde(rename = "TransactItems")] pub transact_items: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransactGetItemsOutput { ///

If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity objects, one for each table addressed by TransactGetItem objects in the TransactItems parameter. These ConsumedCapacity objects report the read-capacity units consumed by the TransactGetItems call in that table.

#[serde(rename = "ConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub consumed_capacity: Option>, - ///

An ordered array of up to 10 ItemResponse objects, each of which corresponds to the TransactGetItem object in the same position in the TransactItems array. Each ItemResponse object contains a Map of the name-value pairs that are the projected attributes of the requested item.

If a requested item could not be retrieved, the corresponding ItemResponse object is Null, or if the requested item has no projected attributes, the corresponding ItemResponse object is an empty Map.

+ ///

An ordered array of up to 25 ItemResponse objects, each of which corresponds to the TransactGetItem object in the same position in the TransactItems array. Each ItemResponse object contains a Map of the name-value pairs that are the projected attributes of the requested item.

If a requested item could not be retrieved, the corresponding ItemResponse object is Null, or if the requested item has no projected attributes, the corresponding ItemResponse object is an empty Map.

#[serde(rename = "Responses")] #[serde(skip_serializing_if = "Option::is_none")] pub responses: Option>, @@ -2261,7 +2260,7 @@ pub struct TransactWriteItem { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TransactWriteItemsInput { - ///

Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, meaning that multiple identical calls have the same effect as one single call.

Although multiple identical calls using the same client request token produce the same result on the server (no side effects), the responses to the calls may not be the same. If the ReturnConsumedCapacity> parameter is set, then the initial TransactWriteItems call returns the amount of write capacity units consumed in making the changes, and subsequent TransactWriteItems calls with the same client token return the amount of read capacity units consumed in reading the item.

A client request token is valid for 10 minutes after the first request that uses it completes. After 10 minutes, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 10 minutes or the result may not be idempotent.

If you submit a request with the same client token but a change in other parameters within the 10 minute idempotency window, DynamoDB returns an IdempotentParameterMismatch exception.

+ ///

Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, meaning that multiple identical calls have the same effect as one single call.

Although multiple identical calls using the same client request token produce the same result on the server (no side effects), the responses to the calls might not be the same. If the ReturnConsumedCapacity> parameter is set, then the initial TransactWriteItems call returns the amount of write capacity units consumed in making the changes. Subsequent TransactWriteItems calls with the same client token return the number of read capacity units consumed in reading the item.

A client request token is valid for 10 minutes after the first request that uses it is completed. After 10 minutes, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 10 minutes, or the result might not be idempotent.

If you submit a request with the same client token but a change in other parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch exception.

#[serde(rename = "ClientRequestToken")] #[serde(skip_serializing_if = "Option::is_none")] pub client_request_token: Option, @@ -2272,19 +2271,19 @@ pub struct TransactWriteItemsInput { #[serde(rename = "ReturnItemCollectionMetrics")] #[serde(skip_serializing_if = "Option::is_none")] pub return_item_collection_metrics: Option, - ///

An ordered array of up to 10 TransactWriteItem objects, each of which contains a ConditionCheck, Put, Update, or Delete object. These can operate on items in different tables, but the tables must reside in the same AWS account and region, and no two of them can operate on the same item.

+ ///

An ordered array of up to 25 TransactWriteItem objects, each of which contains a ConditionCheck, Put, Update, or Delete object. These can operate on items in different tables, but the tables must reside in the same AWS account and Region, and no two of them can operate on the same item.

#[serde(rename = "TransactItems")] pub transact_items: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransactWriteItemsOutput { ///

The capacity units consumed by the entire TransactWriteItems operation. The values of the list are ordered according to the ordering of the TransactItems request parameter.

#[serde(rename = "ConsumedCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub consumed_capacity: Option>, - ///

A list of tables that were processed by TransactWriteItems and, for each table, information about any item collections that were affected by individual UpdateItem, PutItem or DeleteItem operations.

+ ///

A list of tables that were processed by TransactWriteItems and, for each table, information about any item collections that were affected by individual UpdateItem, PutItem, or DeleteItem operations.

#[serde(rename = "ItemCollectionMetrics")] #[serde(skip_serializing_if = "Option::is_none")] pub item_collection_metrics: @@ -2293,10 +2292,10 @@ pub struct TransactWriteItemsOutput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UntagResourceInput { - ///

The Amazon DyanamoDB resource the tags will be removed from. This value is an Amazon Resource Name (ARN).

+ ///

The DynamoDB resource that the tags will be removed from. This value is an Amazon Resource Name (ARN).

#[serde(rename = "ResourceArn")] pub resource_arn: String, - ///

A list of tag keys. Existing tags of the resource whose keys are members of this list will be removed from the Amazon DynamoDB resource.

+ ///

A list of tag keys. Existing tags of the resource whose keys are members of this list will be removed from the DynamoDB resource.

#[serde(rename = "TagKeys")] pub tag_keys: Vec, } @@ -2342,7 +2341,7 @@ pub struct UpdateContinuousBackupsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateContinuousBackupsOutput { ///

Represents the continuous backups and point in time recovery settings on the table.

#[serde(rename = "ContinuousBackupsDescription")] @@ -2366,13 +2365,13 @@ pub struct UpdateGlobalTableInput { ///

The global table name.

#[serde(rename = "GlobalTableName")] pub global_table_name: String, - ///

A list of regions that should be added or removed from the global table.

+ ///

A list of Regions that should be added or removed from the global table.

#[serde(rename = "ReplicaUpdates")] pub replica_updates: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGlobalTableOutput { ///

Contains the details of the global table.

#[serde(rename = "GlobalTableDescription")] @@ -2394,7 +2393,7 @@ pub struct UpdateGlobalTableSettingsInput { ///

The name of the global table

#[serde(rename = "GlobalTableName")] pub global_table_name: String, - ///

AutoScaling settings for managing provisioned write capacity for the global table.

+ ///

Auto scaling settings for managing provisioned write capacity for the global table.

#[serde(rename = "GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate")] #[serde(skip_serializing_if = "Option::is_none")] pub global_table_provisioned_write_capacity_auto_scaling_settings_update: @@ -2403,20 +2402,20 @@ pub struct UpdateGlobalTableSettingsInput { #[serde(rename = "GlobalTableProvisionedWriteCapacityUnits")] #[serde(skip_serializing_if = "Option::is_none")] pub global_table_provisioned_write_capacity_units: Option, - ///

Represents the settings for a global table in a region that will be modified.

+ ///

Represents the settings for a global table in a Region that will be modified.

#[serde(rename = "ReplicaSettingsUpdate")] #[serde(skip_serializing_if = "Option::is_none")] pub replica_settings_update: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGlobalTableSettingsOutput { ///

The name of the global table.

#[serde(rename = "GlobalTableName")] #[serde(skip_serializing_if = "Option::is_none")] pub global_table_name: Option, - ///

The region specific settings for the global table.

+ ///

The Region-specific settings for the global table.

#[serde(rename = "ReplicaSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub replica_settings: Option>, @@ -2429,7 +2428,7 @@ pub struct UpdateItemInput { #[serde(rename = "AttributeUpdates")] #[serde(skip_serializing_if = "Option::is_none")] pub attribute_updates: Option<::std::collections::HashMap>, - ///

A condition that must be satisfied in order for a conditional update to succeed.

An expression can contain any of the following:

  • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

    These function names are case-sensitive.

  • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

  • Logical operators: AND | OR | NOT

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

A condition that must be satisfied in order for a conditional update to succeed.

An expression can contain any of the following:

  • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

    These function names are case-sensitive.

  • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

  • Logical operators: AND | OR | NOT

For more information about condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ConditionExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub condition_expression: Option, @@ -2441,11 +2440,11 @@ pub struct UpdateItemInput { #[serde(rename = "Expected")] #[serde(skip_serializing_if = "Option::is_none")] pub expected: Option<::std::collections::HashMap>, - ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

+ ///

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

  • To access an attribute whose name conflicts with a DynamoDB reserved word.

  • To create a placeholder for repeating occurrences of an attribute name in an expression.

  • To prevent special characters in an attribute name from being misinterpreted in an expression.

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

  • Percentile

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.) To work around this, you could specify the following for ExpressionAttributeNames:

  • {"#P":"Percentile"}

You could then use this substitution in an expression, as in this example:

  • #P = :val

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information about expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeNames")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_names: Option<::std::collections::HashMap>, - ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

+ ///

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.

#[serde(rename = "ExpressionAttributeValues")] #[serde(skip_serializing_if = "Option::is_none")] pub expression_attribute_values: Option<::std::collections::HashMap>, @@ -2466,7 +2465,7 @@ pub struct UpdateItemInput { ///

The name of the table containing the item to update.

#[serde(rename = "TableName")] pub table_name: String, - ///

An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.

The following action values are available for UpdateExpression.

  • SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val

    SET supports the following functions:

    • if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.

    • list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.

    These function names are case-sensitive.

  • REMOVE - Removes one or more attributes from an item.

  • ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

    • If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

      If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

      Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3.

    • If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2], and the ADD action specified [3], then the final attribute value is [1,2,3]. An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type.

      Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.

    The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.

  • DELETE - Deletes an element from a set.

    If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c], then the final attribute value is [b]. Specifying an empty set is an error.

    The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.

You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5

For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.

+ ///

An expression that defines one or more attributes to be updated, the action to be performed on them, and new values for them.

The following action values are available for UpdateExpression.

  • SET - Adds one or more attributes and values to an item. If any of these attributes already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val

    SET supports the following functions:

    • if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.

    • list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.

    These function names are case-sensitive.

  • REMOVE - Removes one or more attributes from an item.

  • ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

    • If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

      If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

      Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3.

    • If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2], and the ADD action specified [3], then the final attribute value is [1,2,3]. An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type.

      Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.

    The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.

  • DELETE - Deletes an element from a set.

    If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c], then the final attribute value is [b]. Specifying an empty set is an error.

    The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.

You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5

For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.

#[serde(rename = "UpdateExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub update_expression: Option, @@ -2474,7 +2473,7 @@ pub struct UpdateItemInput { ///

Represents the output of an UpdateItem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateItemOutput { ///

A map of attribute values as they appear before or after the UpdateItem operation, as determined by the ReturnValues parameter.

The Attributes map is only present if ReturnValues was specified as something other than NONE in the request. Each element represents one attribute.

#[serde(rename = "Attributes")] @@ -2513,7 +2512,7 @@ pub struct UpdateTableInput { #[serde(rename = "SSESpecification")] #[serde(skip_serializing_if = "Option::is_none")] pub sse_specification: Option, - ///

Represents the DynamoDB Streams configuration for the table.

You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.

+ ///

Represents the DynamoDB Streams configuration for the table.

You receive a ResourceInUseException if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.

#[serde(rename = "StreamSpecification")] #[serde(skip_serializing_if = "Option::is_none")] pub stream_specification: Option, @@ -2524,7 +2523,7 @@ pub struct UpdateTableInput { ///

Represents the output of an UpdateTable operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTableOutput { ///

Represents the properties of the table.

#[serde(rename = "TableDescription")] @@ -2544,7 +2543,7 @@ pub struct UpdateTimeToLiveInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTimeToLiveOutput { ///

Represents the output of an UpdateTimeToLive operation.

#[serde(rename = "TimeToLiveSpecification")] @@ -3911,7 +3910,7 @@ pub enum TransactGetItemsError { ProvisionedThroughputExceeded(String), ///

The operation tried to access a nonexistent table or index. The resource might not be specified correctly, or its status might not be ACTIVE.

ResourceNotFound(String), - ///

The entire transaction request was canceled.

DynamoDB cancels a TransactWriteItems request under the following circumstances:

  • A condition in one of the condition expressions is not met.

  • A table in the TransactWriteItems request is in a different account or region.

  • More than one action in the TransactWriteItems operation targets the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • There is a user error, such as an invalid data format.

DynamoDB cancels a TransactGetItems request under the following circumstances:

  • There is an ongoing TransactGetItems operation that conflicts with a concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. In this case the TransactGetItems operation fails with a TransactionCanceledException.

  • A table in the TransactGetItems request is in a different account or region.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • There is a user error, such as an invalid data format.

If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons property. This property is not set for other languages. Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE code and Null message.

Cancellation reason codes and possible error messages:

  • No Errors:

    • Code: NONE

    • Message: null

  • Conditional Check Failed:

    • Code: ConditionalCheckFailed

    • Message: The conditional request failed.

  • Item Collection Size Limit Exceeded:

    • Code: ItemCollectionSizeLimitExceeded

    • Message: Collection size exceeded.

  • Transaction Conflict:

    • Code: TransactionConflict

    • Message: Transaction is ongoing for the item.

  • Provisioned Throughput Exceeded:

    • Code: ProvisionedThroughputExceeded

    • Messages:

      • The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the UpdateTable API.

        This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.

      • The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.

        This message is returned when provisioned throughput is exceeded is on a provisioned GSI.

  • Throttling Error:

    • Code: ThrottlingError

    • Messages:

      • Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your table or index so please try again shortly. If exceptions persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.

        This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.

      • Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so please try again shortly.

        This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.

  • Validation Error:

    • Code: ValidationError

    • Messages:

      • One or more parameter values were invalid.

      • The update expression attempted to update the secondary index key beyond allowed size limits.

      • The update expression attempted to update the secondary index key to unsupported type.

      • An operand in the update expression has an incorrect data type.

      • Item size to update has exceeded the maximum allowed size.

      • Number overflow. Attempting to store a number with magnitude larger than supported range.

      • Type mismatch for attribute to update.

      • Nesting Levels have exceeded supported limits.

      • The document path provided in the update expression is invalid for update.

      • The provided expression refers to an attribute that does not exist in the item.

+ ///

The entire transaction request was canceled.

DynamoDB cancels a TransactWriteItems request under the following circumstances:

  • A condition in one of the condition expressions is not met.

  • A table in the TransactWriteItems request is in a different account or region.

  • More than one action in the TransactWriteItems operation targets the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • The aggregate size of the items in the transaction exceeds 4 MBs.

  • There is a user error, such as an invalid data format.

DynamoDB cancels a TransactGetItems request under the following circumstances:

  • There is an ongoing TransactGetItems operation that conflicts with a concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. In this case the TransactGetItems operation fails with a TransactionCanceledException.

  • A table in the TransactGetItems request is in a different account or region.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • The aggregate size of the items in the transaction exceeds 4 MBs.

  • There is a user error, such as an invalid data format.

If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons property. This property is not set for other languages. Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE code and Null message.

Cancellation reason codes and possible error messages:

  • No Errors:

    • Code: NONE

    • Message: null

  • Conditional Check Failed:

    • Code: ConditionalCheckFailed

    • Message: The conditional request failed.

  • Item Collection Size Limit Exceeded:

    • Code: ItemCollectionSizeLimitExceeded

    • Message: Collection size exceeded.

  • Transaction Conflict:

    • Code: TransactionConflict

    • Message: Transaction is ongoing for the item.

  • Provisioned Throughput Exceeded:

    • Code: ProvisionedThroughputExceeded

    • Messages:

      • The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the UpdateTable API.

        This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.

      • The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.

        This message is returned when provisioned throughput is exceeded is on a provisioned GSI.

  • Throttling Error:

    • Code: ThrottlingError

    • Messages:

      • Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your table or index so please try again shortly. If exceptions persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.

        This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.

      • Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so please try again shortly.

        This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.

  • Validation Error:

    • Code: ValidationError

    • Messages:

      • One or more parameter values were invalid.

      • The update expression attempted to update the secondary index key beyond allowed size limits.

      • The update expression attempted to update the secondary index key to unsupported type.

      • An operand in the update expression has an incorrect data type.

      • Item size to update has exceeded the maximum allowed size.

      • Number overflow. Attempting to store a number with magnitude larger than supported range.

      • Type mismatch for attribute to update.

      • Nesting Levels have exceeded supported limits.

      • The document path provided in the update expression is invalid for update.

      • The provided expression refers to an attribute that does not exist in the item.

TransactionCanceled(String), } @@ -3970,7 +3969,7 @@ pub enum TransactWriteItemsError { ProvisionedThroughputExceeded(String), ///

The operation tried to access a nonexistent table or index. The resource might not be specified correctly, or its status might not be ACTIVE.

ResourceNotFound(String), - ///

The entire transaction request was canceled.

DynamoDB cancels a TransactWriteItems request under the following circumstances:

  • A condition in one of the condition expressions is not met.

  • A table in the TransactWriteItems request is in a different account or region.

  • More than one action in the TransactWriteItems operation targets the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • There is a user error, such as an invalid data format.

DynamoDB cancels a TransactGetItems request under the following circumstances:

  • There is an ongoing TransactGetItems operation that conflicts with a concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. In this case the TransactGetItems operation fails with a TransactionCanceledException.

  • A table in the TransactGetItems request is in a different account or region.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • There is a user error, such as an invalid data format.

If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons property. This property is not set for other languages. Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE code and Null message.

Cancellation reason codes and possible error messages:

  • No Errors:

    • Code: NONE

    • Message: null

  • Conditional Check Failed:

    • Code: ConditionalCheckFailed

    • Message: The conditional request failed.

  • Item Collection Size Limit Exceeded:

    • Code: ItemCollectionSizeLimitExceeded

    • Message: Collection size exceeded.

  • Transaction Conflict:

    • Code: TransactionConflict

    • Message: Transaction is ongoing for the item.

  • Provisioned Throughput Exceeded:

    • Code: ProvisionedThroughputExceeded

    • Messages:

      • The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the UpdateTable API.

        This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.

      • The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.

        This message is returned when provisioned throughput is exceeded is on a provisioned GSI.

  • Throttling Error:

    • Code: ThrottlingError

    • Messages:

      • Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your table or index so please try again shortly. If exceptions persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.

        This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.

      • Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so please try again shortly.

        This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.

  • Validation Error:

    • Code: ValidationError

    • Messages:

      • One or more parameter values were invalid.

      • The update expression attempted to update the secondary index key beyond allowed size limits.

      • The update expression attempted to update the secondary index key to unsupported type.

      • An operand in the update expression has an incorrect data type.

      • Item size to update has exceeded the maximum allowed size.

      • Number overflow. Attempting to store a number with magnitude larger than supported range.

      • Type mismatch for attribute to update.

      • Nesting Levels have exceeded supported limits.

      • The document path provided in the update expression is invalid for update.

      • The provided expression refers to an attribute that does not exist in the item.

+ ///

The entire transaction request was canceled.

DynamoDB cancels a TransactWriteItems request under the following circumstances:

  • A condition in one of the condition expressions is not met.

  • A table in the TransactWriteItems request is in a different account or region.

  • More than one action in the TransactWriteItems operation targets the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • The aggregate size of the items in the transaction exceeds 4 MBs.

  • There is a user error, such as an invalid data format.

DynamoDB cancels a TransactGetItems request under the following circumstances:

  • There is an ongoing TransactGetItems operation that conflicts with a concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. In this case the TransactGetItems operation fails with a TransactionCanceledException.

  • A table in the TransactGetItems request is in a different account or region.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • The aggregate size of the items in the transaction exceeds 4 MBs.

  • There is a user error, such as an invalid data format.

If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons property. This property is not set for other languages. Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE code and Null message.

Cancellation reason codes and possible error messages:

  • No Errors:

    • Code: NONE

    • Message: null

  • Conditional Check Failed:

    • Code: ConditionalCheckFailed

    • Message: The conditional request failed.

  • Item Collection Size Limit Exceeded:

    • Code: ItemCollectionSizeLimitExceeded

    • Message: Collection size exceeded.

  • Transaction Conflict:

    • Code: TransactionConflict

    • Message: Transaction is ongoing for the item.

  • Provisioned Throughput Exceeded:

    • Code: ProvisionedThroughputExceeded

    • Messages:

      • The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the UpdateTable API.

        This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.

      • The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.

        This message is returned when provisioned throughput is exceeded is on a provisioned GSI.

  • Throttling Error:

    • Code: ThrottlingError

    • Messages:

      • Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your table or index so please try again shortly. If exceptions persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.

        This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.

      • Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so please try again shortly.

        This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.

  • Validation Error:

    • Code: ValidationError

    • Messages:

      • One or more parameter values were invalid.

      • The update expression attempted to update the secondary index key beyond allowed size limits.

      • The update expression attempted to update the secondary index key to unsupported type.

      • An operand in the update expression has an incorrect data type.

      • Item size to update has exceeded the maximum allowed size.

      • Number overflow. Attempting to store a number with magnitude larger than supported range.

      • Type mismatch for attribute to update.

      • Nesting Levels have exceeded supported limits.

      • The document path provided in the update expression is invalid for update.

      • The provided expression refers to an attribute that does not exist in the item.

TransactionCanceled(String), ///

The transaction with the given request token is already in progress.

TransactionInProgress(String), @@ -4451,31 +4450,31 @@ impl Error for UpdateTimeToLiveError { } /// Trait representing the capabilities of the DynamoDB API. DynamoDB clients implement this trait. pub trait DynamoDb { - ///

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message "Too many items requested for the BatchGetItem call".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

+ ///

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items, BatchGetItem returns a ValidationException with the message "Too many items requested for the BatchGetItem call."

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

fn batch_get_item( &self, input: BatchGetItemInput, ) -> RusotoFuture; - ///

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

+ ///

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

fn batch_write_item( &self, input: BatchWriteItemInput, ) -> RusotoFuture; - ///

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Streams

  • Provisioned read and write capacity

+ ///

Creates a backup for an existing table.

Each time you create an on-demand backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an on-demand backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup might contain data modifications made between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency.

Along with data, the following are also included on the backups:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Streams

  • Provisioned read and write capacity

fn create_backup( &self, input: CreateBackupInput, ) -> RusotoFuture; - ///

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

+ ///

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

fn create_global_table( &self, input: CreateGlobalTableInput, ) -> RusotoFuture; - ///

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

+ ///

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each Region. That is, you can have two tables with same name if you create the tables in different Regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

fn create_table( &self, input: CreateTableInput, @@ -4505,7 +4504,7 @@ pub trait DynamoDb { input: DescribeBackupInput, ) -> RusotoFuture; - ///

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

+ ///

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

fn describe_continuous_backups( &self, input: DescribeContinuousBackupsInput, @@ -4521,13 +4520,13 @@ pub trait DynamoDb { input: DescribeGlobalTableInput, ) -> RusotoFuture; - ///

Describes region specific settings for a global table.

+ ///

Describes Region-specific settings for a global table.

fn describe_global_table_settings( &self, input: DescribeGlobalTableSettingsInput, ) -> RusotoFuture; - ///

Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

    • Call DescribeTable with the table name.

    • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

    • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

  5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

+ ///

Returns the current provisioned-capacity limits for your AWS account in a Region, both for the Region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given Region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular Region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that Region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

    • Call DescribeTable with the table name.

    • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

    • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

  5. Report the account limits for that Region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned capacity extremely rapidly. But the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

fn describe_limits(&self) -> RusotoFuture; ///

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

@@ -4545,13 +4544,13 @@ pub trait DynamoDb { ///

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

fn get_item(&self, input: GetItemInput) -> RusotoFuture; - ///

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of 5 times per second.

+ ///

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive, but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of five times per second.

fn list_backups( &self, input: ListBackupsInput, ) -> RusotoFuture; - ///

Lists all global tables that have a replica in the specified region.

+ ///

Lists all global tables that have a replica in the specified Region.

fn list_global_tables( &self, input: ListGlobalTablesInput, @@ -4569,52 +4568,52 @@ pub trait DynamoDb { input: ListTagsOfResourceInput, ) -> RusotoFuture; - ///

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

+ ///

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

fn put_item(&self, input: PutItemInput) -> RusotoFuture; ///

The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

fn query(&self, input: QueryInput) -> RusotoFuture; - ///

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

+ ///

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Amazon CloudWatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

fn restore_table_from_backup( &self, input: RestoreTableFromBackupInput, ) -> RusotoFuture; - ///

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Provisioned read and write capacity

  • Encryption settings

    All these settings come from the current settings of the source table at the time of restore.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

  • Point in time recovery settings

+ ///

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Provisioned read and write capacity

  • Encryption settings

    All these settings come from the current settings of the source table at the time of restore.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Amazon CloudWatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

  • Point in time recovery settings

fn restore_table_to_point_in_time( &self, input: RestoreTableToPointInTimeInput, ) -> RusotoFuture; - ///

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

+ ///

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

fn scan(&self, input: ScanInput) -> RusotoFuture; - ///

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

+ ///

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

fn tag_resource(&self, input: TagResourceInput) -> RusotoFuture<(), TagResourceError>; - ///

TransactGetItems is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and region. A TransactGetItems call can contain up to 10 TransactGetItem objects, each of which contains a Get structure that specifies an item to retrieve from a table in the account and region. A call to TransactGetItems cannot retrieve items from tables in more than one AWS account or region.

DynamoDB rejects the entire TransactGetItems request if any of the following is true:

  • A conflicting operation is in the process of updating an item to be read.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • There is a user error, such as an invalid data format.

+ ///

TransactGetItems is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and Region. A TransactGetItems call can contain up to 25 TransactGetItem objects, each of which contains a Get structure that specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems cannot retrieve items from tables in more than one AWS account or Region. The aggregate size of the items in the transaction cannot exceed 4 MB.

All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction with up to 4 MB of data, except the following AWS Regions:

  • China (Beijing)

  • China (Ningxia)

The China (Beijing) and China (Ningxia) Regions support up to 10 items per transaction with up to 4 MB of data.

DynamoDB rejects the entire TransactGetItems request if any of the following is true:

  • A conflicting operation is in the process of updating an item to be read.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • There is a user error, such as an invalid data format.

  • The aggregate size of the items in the transaction cannot exceed 4 MB.

fn transact_get_items( &self, input: TransactGetItemsInput, ) -> RusotoFuture; - ///

TransactWriteItems is a synchronous write operation that groups up to 10 action requests. These actions can target items in different tables, but not in different AWS accounts or regions, and no two actions can target the same item. For example, you cannot both ConditionCheck and Update the same item.

The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:

  • Put  —   Initiates a PutItem operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

  • Update  —   Initiates an UpdateItem operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

  • Delete  —   Initiates a DeleteItem operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

  • ConditionCheck  —   Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

DynamoDB rejects the entire TransactWriteItems request if any of the following is true:

  • A condition in one of the condition expressions is not met.

  • A conflicting operation is in the process of updating the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (bigger than 400 KB), a Local Secondary Index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • There is a user error, such as an invalid data format.

+ ///

TransactWriteItems is a synchronous write operation that groups up to 25 action requests. These actions can target items in different tables, but not in different AWS accounts or Regions, and no two actions can target the same item. For example, you cannot both ConditionCheck and Update the same item. The aggregate size of the items in the transaction cannot exceed 4 MB.

All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction with up to 4 MB of data, except the following AWS Regions:

  • China (Beijing)

  • China (Ningxia)

The China (Beijing) and China (Ningxia) Regions support up to 10 items per transaction with up to 4 MB of data.

The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:

  • Put  —   Initiates a PutItem operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether to retrieve the item's attributes if the condition is not met.

  • Update  —   Initiates an UpdateItem operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether to retrieve the item's attributes if the condition is not met.

  • Delete  —   Initiates a DeleteItem operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.

  • ConditionCheck  —   Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.

DynamoDB rejects the entire TransactWriteItems request if any of the following is true:

  • A condition in one of the condition expressions is not met.

  • An ongoing operation is in the process of updating the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • The aggregate size of the items in the transaction exceeds 4 MB.

  • There is a user error, such as an invalid data format.

fn transact_write_items( &self, input: TransactWriteItemsInput, ) -> RusotoFuture; - ///

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

+ ///

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to five times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

fn untag_resource(&self, input: UntagResourceInput) -> RusotoFuture<(), UntagResourceError>; - ///

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days..

+ ///

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

fn update_continuous_backups( &self, input: UpdateContinuousBackupsInput, ) -> RusotoFuture; - ///

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

+ ///

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

fn update_global_table( &self, input: UpdateGlobalTableInput, @@ -4632,13 +4631,13 @@ pub trait DynamoDb { input: UpdateItemInput, ) -> RusotoFuture; - ///

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Enable or disable Streams on the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. Once the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

+ ///

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Enable or disable DynamoDB Streams on the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

fn update_table( &self, input: UpdateTableInput, ) -> RusotoFuture; - ///

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

+ ///

The UpdateTimeToLive method enables or disables Time to Live (TTL) for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. It can take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any local secondary index and global secondary index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

fn update_time_to_live( &self, input: UpdateTimeToLiveInput, @@ -4656,10 +4655,7 @@ impl DynamoDbClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DynamoDbClient { - DynamoDbClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4673,15 +4669,19 @@ impl DynamoDbClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DynamoDbClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DynamoDbClient { + DynamoDbClient { client, region } } } impl DynamoDb for DynamoDbClient { - ///

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message "Too many items requested for the BatchGetItem call".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

+ ///

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items, BatchGetItem returns a ValidationException with the message "Too many items requested for the BatchGetItem call."

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

fn batch_get_item( &self, input: BatchGetItemInput, @@ -4710,7 +4710,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

+ ///

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

fn batch_write_item( &self, input: BatchWriteItemInput, @@ -4739,7 +4739,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Streams

  • Provisioned read and write capacity

+ ///

Creates a backup for an existing table.

Each time you create an on-demand backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an on-demand backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup might contain data modifications made between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency.

Along with data, the following are also included on the backups:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Streams

  • Provisioned read and write capacity

fn create_backup( &self, input: CreateBackupInput, @@ -4768,7 +4768,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

+ ///

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

fn create_global_table( &self, input: CreateGlobalTableInput, @@ -4797,7 +4797,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

+ ///

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each Region. That is, you can have two tables with same name if you create the tables in different Regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

fn create_table( &self, input: CreateTableInput, @@ -4942,7 +4942,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

+ ///

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

fn describe_continuous_backups( &self, input: DescribeContinuousBackupsInput, @@ -5026,7 +5026,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Describes region specific settings for a global table.

+ ///

Describes Region-specific settings for a global table.

fn describe_global_table_settings( &self, input: DescribeGlobalTableSettingsInput, @@ -5055,7 +5055,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

    • Call DescribeTable with the table name.

    • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

    • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

  5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

+ ///

Returns the current provisioned-capacity limits for your AWS account in a Region, both for the Region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given Region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular Region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that Region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

    • Call DescribeTable with the table name.

    • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

    • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

  5. Report the account limits for that Region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned capacity extremely rapidly. But the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

fn describe_limits(&self) -> RusotoFuture { let mut request = SignedRequest::new("POST", "dynamodb", &self.region, "/"); @@ -5163,7 +5163,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of 5 times per second.

+ ///

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive, but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of five times per second.

fn list_backups( &self, input: ListBackupsInput, @@ -5192,7 +5192,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Lists all global tables that have a replica in the specified region.

+ ///

Lists all global tables that have a replica in the specified Region.

fn list_global_tables( &self, input: ListGlobalTablesInput, @@ -5279,7 +5279,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

+ ///

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

fn put_item(&self, input: PutItemInput) -> RusotoFuture { let mut request = SignedRequest::new("POST", "dynamodb", &self.region, "/"); @@ -5329,7 +5329,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

+ ///

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Amazon CloudWatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

fn restore_table_from_backup( &self, input: RestoreTableFromBackupInput, @@ -5357,7 +5357,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Provisioned read and write capacity

  • Encryption settings

    All these settings come from the current settings of the source table at the time of restore.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

  • Point in time recovery settings

+ ///

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Provisioned read and write capacity

  • Encryption settings

    All these settings come from the current settings of the source table at the time of restore.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Amazon CloudWatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

  • Point in time recovery settings

fn restore_table_to_point_in_time( &self, input: RestoreTableToPointInTimeInput, @@ -5386,7 +5386,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

+ ///

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

fn scan(&self, input: ScanInput) -> RusotoFuture { let mut request = SignedRequest::new("POST", "dynamodb", &self.region, "/"); @@ -5411,7 +5411,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

+ ///

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

fn tag_resource(&self, input: TagResourceInput) -> RusotoFuture<(), TagResourceError> { let mut request = SignedRequest::new("POST", "dynamodb", &self.region, "/"); @@ -5434,7 +5434,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

TransactGetItems is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and region. A TransactGetItems call can contain up to 10 TransactGetItem objects, each of which contains a Get structure that specifies an item to retrieve from a table in the account and region. A call to TransactGetItems cannot retrieve items from tables in more than one AWS account or region.

DynamoDB rejects the entire TransactGetItems request if any of the following is true:

  • A conflicting operation is in the process of updating an item to be read.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • There is a user error, such as an invalid data format.

+ ///

TransactGetItems is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and Region. A TransactGetItems call can contain up to 25 TransactGetItem objects, each of which contains a Get structure that specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems cannot retrieve items from tables in more than one AWS account or Region. The aggregate size of the items in the transaction cannot exceed 4 MB.

All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction with up to 4 MB of data, except the following AWS Regions:

  • China (Beijing)

  • China (Ningxia)

The China (Beijing) and China (Ningxia) Regions support up to 10 items per transaction with up to 4 MB of data.

DynamoDB rejects the entire TransactGetItems request if any of the following is true:

  • A conflicting operation is in the process of updating an item to be read.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • There is a user error, such as an invalid data format.

  • The aggregate size of the items in the transaction cannot exceed 4 MB.

fn transact_get_items( &self, input: TransactGetItemsInput, @@ -5463,7 +5463,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

TransactWriteItems is a synchronous write operation that groups up to 10 action requests. These actions can target items in different tables, but not in different AWS accounts or regions, and no two actions can target the same item. For example, you cannot both ConditionCheck and Update the same item.

The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:

  • Put  —   Initiates a PutItem operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

  • Update  —   Initiates an UpdateItem operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

  • Delete  —   Initiates a DeleteItem operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

  • ConditionCheck  —   Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether or not to retrieve the item's attributes if the condition is not met.

DynamoDB rejects the entire TransactWriteItems request if any of the following is true:

  • A condition in one of the condition expressions is not met.

  • A conflicting operation is in the process of updating the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (bigger than 400 KB), a Local Secondary Index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • There is a user error, such as an invalid data format.

+ ///

TransactWriteItems is a synchronous write operation that groups up to 25 action requests. These actions can target items in different tables, but not in different AWS accounts or Regions, and no two actions can target the same item. For example, you cannot both ConditionCheck and Update the same item. The aggregate size of the items in the transaction cannot exceed 4 MB.

All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction with up to 4 MB of data, except the following AWS Regions:

  • China (Beijing)

  • China (Ningxia)

The China (Beijing) and China (Ningxia) Regions support up to 10 items per transaction with up to 4 MB of data.

The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:

  • Put  —   Initiates a PutItem operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether to retrieve the item's attributes if the condition is not met.

  • Update  —   Initiates an UpdateItem operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether to retrieve the item's attributes if the condition is not met.

  • Delete  —   Initiates a DeleteItem operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.

  • ConditionCheck  —   Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.

DynamoDB rejects the entire TransactWriteItems request if any of the following is true:

  • A condition in one of the condition expressions is not met.

  • An ongoing operation is in the process of updating the same item.

  • There is insufficient provisioned capacity for the transaction to be completed.

  • An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.

  • The aggregate size of the items in the transaction exceeds 4 MB.

  • There is a user error, such as an invalid data format.

fn transact_write_items( &self, input: TransactWriteItemsInput, @@ -5492,7 +5492,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

+ ///

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to five times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

fn untag_resource(&self, input: UntagResourceInput) -> RusotoFuture<(), UntagResourceError> { let mut request = SignedRequest::new("POST", "dynamodb", &self.region, "/"); @@ -5515,7 +5515,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days..

+ ///

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

fn update_continuous_backups( &self, input: UpdateContinuousBackupsInput, @@ -5541,7 +5541,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

+ ///

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

fn update_global_table( &self, input: UpdateGlobalTableInput, @@ -5628,7 +5628,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Enable or disable Streams on the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. Once the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

+ ///

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Enable or disable DynamoDB Streams on the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

fn update_table( &self, input: UpdateTableInput, @@ -5657,7 +5657,7 @@ impl DynamoDb for DynamoDbClient { }) } - ///

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

+ ///

The UpdateTimeToLive method enables or disables Time to Live (TTL) for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. It can take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any local secondary index and global secondary index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

fn update_time_to_live( &self, input: UpdateTimeToLiveInput, diff --git a/rusoto/services/dynamodb/src/lib.rs b/rusoto/services/dynamodb/src/lib.rs index 4242b61a016..0a0b6c8183f 100644 --- a/rusoto/services/dynamodb/src/lib.rs +++ b/rusoto/services/dynamodb/src/lib.rs @@ -35,21 +35,19 @@ //! let list_tables_input: ListTablesInput = Default::default(); //! //! match client.list_tables(list_tables_input).sync() { -//! Ok(output) => { -//! match output.table_names { -//! Some(table_name_list) => { -//! println!("Tables in database:"); +//! Ok(output) => match output.table_names { +//! Some(table_name_list) => { +//! println!("Tables in database:"); //! -//! for table_name in table_name_list { -//! println!("{}", table_name); -//! } -//! }, -//! None => println!("No tables in database!"), +//! for table_name in table_name_list { +//! println!("{}", table_name); +//! } //! } +//! None => println!("No tables in database!"), //! }, //! Err(error) => { //! println!("Error: {:?}", error); -//! }, +//! } //! } //! } //! ``` diff --git a/rusoto/services/dynamodbstreams/Cargo.toml b/rusoto/services/dynamodbstreams/Cargo.toml index 498a6adfb84..2b4af6a0e7f 100644 --- a/rusoto/services/dynamodbstreams/Cargo.toml +++ b/rusoto/services/dynamodbstreams/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_dynamodbstreams" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/dynamodbstreams/README.md b/rusoto/services/dynamodbstreams/README.md index 337c7ee4136..0e018de77bc 100644 --- a/rusoto/services/dynamodbstreams/README.md +++ b/rusoto/services/dynamodbstreams/README.md @@ -23,9 +23,16 @@ To use `rusoto_dynamodbstreams` in your application, add it as a dependency in y ```toml [dependencies] -rusoto_dynamodbstreams = "0.40.0" +rusoto_dynamodbstreams = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/dynamodbstreams/src/custom/mod.rs b/rusoto/services/dynamodbstreams/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/dynamodbstreams/src/custom/mod.rs +++ b/rusoto/services/dynamodbstreams/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/dynamodbstreams/src/generated.rs b/rusoto/services/dynamodbstreams/src/generated.rs index 39911404aaa..efe193eda2d 100644 --- a/rusoto/services/dynamodbstreams/src/generated.rs +++ b/rusoto/services/dynamodbstreams/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Represents the data for an attribute. You can set one, and only one, of the elements.

Each attribute in an item is a name-value pair. An attribute can be single-valued or multi-valued set. For example, a book item can have title and authors attributes. Each book has one title but can have many authors. The multi-valued attribute is a set; duplicate values are not allowed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttributeValue { ///

A Binary data type.

#[serde(rename = "B")] @@ -98,7 +97,7 @@ pub struct DescribeStreamInput { ///

Represents the output of a DescribeStream operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamOutput { ///

A complete description of the stream, including its creation date and time, the DynamoDB table associated with the stream, the shard IDs within the stream, and the beginning and ending sequence numbers of stream records within the shards.

#[serde(rename = "StreamDescription")] @@ -120,7 +119,7 @@ pub struct GetRecordsInput { ///

Represents the output of a GetRecords operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRecordsOutput { ///

The next position in the shard from which to start sequentially reading stream records. If set to null, the shard has been closed and the requested iterator will not return any more data.

#[serde(rename = "NextShardIterator")] @@ -152,7 +151,7 @@ pub struct GetShardIteratorInput { ///

Represents the output of a GetShardIterator operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetShardIteratorOutput { ///

The position in the shard from which to start reading stream records sequentially. A shard iterator specifies this position using the sequence number of a stream record in a shard.

#[serde(rename = "ShardIterator")] @@ -162,7 +161,7 @@ pub struct GetShardIteratorOutput { ///

Contains details about the type of identity that made the request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Identity { ///

A unique identifier for the entity that made the call. For Time To Live, the principalId is "dynamodb.amazonaws.com".

#[serde(rename = "PrincipalId")] @@ -176,7 +175,7 @@ pub struct Identity { ///

Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.

A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key (partition key) would be represented by one KeySchemaElement. A composite primary key (partition key and sort key) would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.

The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeySchemaElement { ///

The name of a key attribute.

#[serde(rename = "AttributeName")] @@ -205,7 +204,7 @@ pub struct ListStreamsInput { ///

Represents the output of a ListStreams operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStreamsOutput { ///

The stream ARN of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

If LastEvaluatedStreamArn is empty, then the "last page" of results has been processed and there is no more data to be retrieved.

If LastEvaluatedStreamArn is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedStreamArn is empty.

#[serde(rename = "LastEvaluatedStreamArn")] @@ -219,7 +218,7 @@ pub struct ListStreamsOutput { ///

A description of a unique event within a stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Record { ///

The region in which the GetRecords request was received.

#[serde(rename = "awsRegion")] @@ -253,7 +252,7 @@ pub struct Record { ///

The beginning and ending sequence numbers for the stream records contained within a shard.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SequenceNumberRange { ///

The last sequence number.

#[serde(rename = "EndingSequenceNumber")] @@ -267,7 +266,7 @@ pub struct SequenceNumberRange { ///

A uniquely identified group of stream records within a stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Shard { ///

The shard ID of the current shard's parent.

#[serde(rename = "ParentShardId")] @@ -285,7 +284,7 @@ pub struct Shard { ///

Represents all of the data describing a particular stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Stream { ///

The Amazon Resource Name (ARN) for the stream.

#[serde(rename = "StreamArn")] @@ -303,7 +302,7 @@ pub struct Stream { ///

Represents all of the data describing a particular stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamDescription { ///

The date and time when the request to create this stream was issued.

#[serde(rename = "CreationRequestDateTime")] @@ -345,7 +344,7 @@ pub struct StreamDescription { ///

A description of a single data modification that was performed on an item in a DynamoDB table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamRecord { ///

The approximate date and time when the stream record was created, in UNIX epoch time format.

#[serde(rename = "ApproximateCreationDateTime")] @@ -597,10 +596,7 @@ impl DynamoDbStreamsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> DynamoDbStreamsClient { - DynamoDbStreamsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -614,10 +610,14 @@ impl DynamoDbStreamsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - DynamoDbStreamsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> DynamoDbStreamsClient { + DynamoDbStreamsClient { client, region } } } diff --git a/rusoto/services/ec2-instance-connect/Cargo.toml b/rusoto/services/ec2-instance-connect/Cargo.toml new file mode 100644 index 00000000000..600a7d46804 --- /dev/null +++ b/rusoto/services/ec2-instance-connect/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - AWS EC2 Instance Connect @ 2018-04-02" +documentation = "https://docs.rs/rusoto_ec2_instance_connect" +keywords = ["AWS", "Amazon", "ec2-instance-connect"] +license = "MIT" +name = "rusoto_ec2_instance_connect" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ec2-instance-connect/README.md b/rusoto/services/ec2-instance-connect/README.md new file mode 100644 index 00000000000..0c91cfea409 --- /dev/null +++ b/rusoto/services/ec2-instance-connect/README.md @@ -0,0 +1,52 @@ + +# Rusoto Ec2InstanceConnect +Rust SDK for AWS EC2 Instance Connect + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_ec2_instance_connect` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_ec2_instance_connect = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_ec2_instance_connect "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/ec2-instance-connect/src/custom/mod.rs b/rusoto/services/ec2-instance-connect/src/custom/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rusoto/services/ec2-instance-connect/src/generated.rs b/rusoto/services/ec2-instance-connect/src/generated.rs new file mode 100644 index 00000000000..fd16bfa81db --- /dev/null +++ b/rusoto/services/ec2-instance-connect/src/generated.rs @@ -0,0 +1,191 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SendSSHPublicKeyRequest { + ///

The availability zone the EC2 instance was launched in.

+ #[serde(rename = "AvailabilityZone")] + pub availability_zone: String, + ///

The EC2 instance you wish to publish the SSH key to.

+ #[serde(rename = "InstanceId")] + pub instance_id: String, + ///

The OS user on the EC2 instance whom the key may be used to authenticate as.

+ #[serde(rename = "InstanceOSUser")] + pub instance_os_user: String, + ///

The public key to be published to the instance. To use it after publication you must have the matching private key.

+ #[serde(rename = "SSHPublicKey")] + pub ssh_public_key: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct SendSSHPublicKeyResponse { + ///

The request ID as logged by EC2 Connect. Please provide this when contacting AWS Support.

+ #[serde(rename = "RequestId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub request_id: Option, + ///

Indicates request success.

+ #[serde(rename = "Success")] + #[serde(skip_serializing_if = "Option::is_none")] + pub success: Option, +} + +/// Errors returned by SendSSHPublicKey +#[derive(Debug, PartialEq)] +pub enum SendSSHPublicKeyError { + ///

Indicates that either your AWS credentials are invalid or you do not have access to the EC2 instance.

+ Auth(String), + ///

Indicates that the instance requested was not found in the given zone. Check that you have provided a valid instance ID and the correct zone.

+ EC2InstanceNotFound(String), + ///

Indicates that you provided bad input. Ensure you have a valid instance ID, the correct zone, and a valid SSH public key.

+ InvalidArgs(String), + ///

Indicates that the service encountered an error. Follow the message's instructions and try again.

+ Service(String), + ///

Indicates you have been making requests too frequently and have been throttled. Wait for a while and try again. If higher call volume is warranted contact AWS Support.

+ Throttling(String), +} + +impl SendSSHPublicKeyError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AuthException" => { + return RusotoError::Service(SendSSHPublicKeyError::Auth(err.msg)) + } + "EC2InstanceNotFoundException" => { + return RusotoError::Service(SendSSHPublicKeyError::EC2InstanceNotFound( + err.msg, + )) + } + "InvalidArgsException" => { + return RusotoError::Service(SendSSHPublicKeyError::InvalidArgs(err.msg)) + } + "ServiceException" => { + return RusotoError::Service(SendSSHPublicKeyError::Service(err.msg)) + } + "ThrottlingException" => { + return RusotoError::Service(SendSSHPublicKeyError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for SendSSHPublicKeyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for SendSSHPublicKeyError { + fn description(&self) -> &str { + match *self { + SendSSHPublicKeyError::Auth(ref cause) => cause, + SendSSHPublicKeyError::EC2InstanceNotFound(ref cause) => cause, + SendSSHPublicKeyError::InvalidArgs(ref cause) => cause, + SendSSHPublicKeyError::Service(ref cause) => cause, + SendSSHPublicKeyError::Throttling(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the EC2 Instance Connect API. EC2 Instance Connect clients implement this trait. +pub trait Ec2InstanceConnect { + ///

Pushes an SSH public key to a particular OS user on a given EC2 instance for 60 seconds.

+ fn send_ssh_public_key( + &self, + input: SendSSHPublicKeyRequest, + ) -> RusotoFuture; +} +/// A client for the EC2 Instance Connect API. +#[derive(Clone)] +pub struct Ec2InstanceConnectClient { + client: Client, + region: region::Region, +} + +impl Ec2InstanceConnectClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> Ec2InstanceConnectClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> Ec2InstanceConnectClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Ec2InstanceConnectClient { + Ec2InstanceConnectClient { client, region } + } +} + +impl Ec2InstanceConnect for Ec2InstanceConnectClient { + ///

Pushes an SSH public key to a particular OS user on a given EC2 instance for 60 seconds.

+ fn send_ssh_public_key( + &self, + input: SendSSHPublicKeyRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2-instance-connect", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AWSEC2InstanceConnectService.SendSSHPublicKey", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(SendSSHPublicKeyError::from_response(response))), + ) + } + }) + } +} diff --git a/rusoto/services/ec2-instance-connect/src/lib.rs b/rusoto/services/ec2-instance-connect/src/lib.rs new file mode 100644 index 00000000000..5cad86aac1a --- /dev/null +++ b/rusoto/services/ec2-instance-connect/src/lib.rs @@ -0,0 +1,32 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

AWS EC2 Connect Service is a service that enables system administrators to publish temporary SSH keys to their EC2 instances in order to establish connections to their instances without leaving a permanent authentication option.

+//! +//! If you're using the service, you're probably looking for [Ec2InstanceConnectClient](struct.Ec2InstanceConnectClient.html) and [Ec2InstanceConnect](trait.Ec2InstanceConnect.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/ec2/Cargo.toml b/rusoto/services/ec2/Cargo.toml index aa606c50454..246331daaec 100644 --- a/rusoto/services/ec2/Cargo.toml +++ b/rusoto/services/ec2/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ec2" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ec2/README.md b/rusoto/services/ec2/README.md index 91e8c873e3e..c357a3aa62e 100644 --- a/rusoto/services/ec2/README.md +++ b/rusoto/services/ec2/README.md @@ -23,9 +23,16 @@ To use `rusoto_ec2` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_ec2 = "0.40.0" +rusoto_ec2 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ec2/src/custom/mod.rs b/rusoto/services/ec2/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ec2/src/custom/mod.rs +++ b/rusoto/services/ec2/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ec2/src/generated.rs b/rusoto/services/ec2/src/generated.rs index 621d6b70424..c0d5b149b53 100644 --- a/rusoto/services/ec2/src/generated.rs +++ b/rusoto/services/ec2/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -1071,6 +1070,96 @@ impl AssignPrivateIpAddressesRequestSerializer { } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct AssignPrivateIpAddressesResult { + ///

The private IP addresses assigned to the network interface.

+ pub assigned_private_ip_addresses: Option>, + ///

The ID of the network interface.

+ pub network_interface_id: Option, +} + +struct AssignPrivateIpAddressesResultDeserializer; +impl AssignPrivateIpAddressesResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, AssignPrivateIpAddressesResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "assignedPrivateIpAddressesSet" => { + obj.assigned_private_ip_addresses + .get_or_insert(vec![]) + .extend(AssignedPrivateIpAddressListDeserializer::deserialize( + "assignedPrivateIpAddressesSet", + stack, + )?); + } + "networkInterfaceId" => { + obj.network_interface_id = Some(StringDeserializer::deserialize( + "networkInterfaceId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +///

Describes the private IP addresses assigned to a network interface.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct AssignedPrivateIpAddress { + ///

The private IP address assigned to the network interface.

+ pub private_ip_address: Option, +} + +struct AssignedPrivateIpAddressDeserializer; +impl AssignedPrivateIpAddressDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, AssignedPrivateIpAddress, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "privateIpAddress" => { + obj.private_ip_address = + Some(StringDeserializer::deserialize("privateIpAddress", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +struct AssignedPrivateIpAddressListDeserializer; +impl AssignedPrivateIpAddressListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(AssignedPrivateIpAddressDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct AssociateAddressRequest { ///

[EC2-VPC] The allocation ID. This is required for EC2-VPC.

@@ -3026,7 +3115,6 @@ impl CancelCapacityReservationResultDeserializer { ) } } -///

Contains the parameters for CancelConversionTask.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CancelConversionRequest { ///

The ID of the conversion task.

@@ -3059,7 +3147,6 @@ impl CancelConversionRequestSerializer { } } -///

Contains the parameters for CancelExportTask.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CancelExportTaskRequest { ///

The ID of the export task. This is the ID returned by CreateInstanceExportTask.

@@ -3082,7 +3169,6 @@ impl CancelExportTaskRequestSerializer { } } -///

Contains the parameters for CancelImportTask.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CancelImportTaskRequest { ///

The reason for canceling the task.

@@ -3114,7 +3200,6 @@ impl CancelImportTaskRequestSerializer { } } -///

Contains the output for CancelImportTask.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CancelImportTaskResult { ///

The ID of the task being canceled.

@@ -3587,8 +3672,12 @@ impl CancelledSpotInstanceRequestListDeserializer { pub struct CapacityReservation { ///

The Availability Zone in which the capacity is reserved.

pub availability_zone: Option, + ///

The Availability Zone ID of the Capacity Reservation.

+ pub availability_zone_id: Option, ///

The remaining capacity. Indicates the number of instances that can be launched in the Capacity Reservation.

pub available_instance_count: Option, + ///

The Amazon Resource Name (ARN) of the Capacity Reservation.

+ pub capacity_reservation_arn: Option, ///

The ID of the Capacity Reservation.

pub capacity_reservation_id: Option, ///

The date and time at which the Capacity Reservation was created.

@@ -3607,13 +3696,15 @@ pub struct CapacityReservation { pub instance_platform: Option, ///

The type of instance for which the Capacity Reservation reserves capacity.

pub instance_type: Option, - ///

The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

  • active - The Capacity Reservation is active and the capacity is available for your use.

  • cancelled - The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.

  • expired - The Capacity Reservation was manually cancelled. The reserved capacity is no longer available for your use.

  • pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

  • failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

+ ///

The ID of the AWS account that owns the Capacity Reservation.

+ pub owner_id: Option, + ///

The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

  • active - The Capacity Reservation is active and the capacity is available for your use.

  • expired - The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.

  • cancelled - The Capacity Reservation was manually cancelled. The reserved capacity is no longer available for your use.

  • pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

  • failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

pub state: Option, ///

Any tags assigned to the Capacity Reservation.

pub tags: Option>, ///

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

  • default - The Capacity Reservation is created on hardware that is shared with other AWS accounts.

  • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single AWS account.

pub tenancy: Option, - ///

The number of instances for which the Capacity Reservation reserves capacity.

+ ///

The total number of instances for which the Capacity Reservation reserves capacity.

pub total_instance_count: Option, } @@ -3630,12 +3721,24 @@ impl CapacityReservationDeserializer { obj.availability_zone = Some(StringDeserializer::deserialize("availabilityZone", stack)?); } + "availabilityZoneId" => { + obj.availability_zone_id = Some(StringDeserializer::deserialize( + "availabilityZoneId", + stack, + )?); + } "availableInstanceCount" => { obj.available_instance_count = Some(IntegerDeserializer::deserialize( "availableInstanceCount", stack, )?); } + "capacityReservationArn" => { + obj.capacity_reservation_arn = Some(StringDeserializer::deserialize( + "capacityReservationArn", + stack, + )?); + } "capacityReservationId" => { obj.capacity_reservation_id = Some(StringDeserializer::deserialize( "capacityReservationId", @@ -3679,6 +3782,9 @@ impl CapacityReservationDeserializer { obj.instance_type = Some(StringDeserializer::deserialize("instanceType", stack)?); } + "ownerId" => { + obj.owner_id = Some(StringDeserializer::deserialize("ownerId", stack)?); + } "state" => { obj.state = Some(CapacityReservationStateDeserializer::deserialize( "state", stack, @@ -4721,7 +4827,7 @@ pub struct ClientVpnEndpoint { pub dns_servers: Option>, ///

The ARN of the server certificate.

pub server_certificate_arn: Option, - ///

Indicates whether VPN split tunneling is supported.

+ ///

Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint.

For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client VPN Endpoint in the AWS Client VPN Administrator Guide.

pub split_tunnel: Option, ///

The current state of the Client VPN endpoint.

pub status: Option, @@ -5533,9 +5639,9 @@ pub struct CopySnapshotRequest { pub destination_region: Option, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

Specifies whether the destination snapshot should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot use it to create an unencrypted copy of an encrypted snapshot. Your default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

+ ///

To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Otherwise, omit this parameter. Encrypted snapshots are encrypted, even if you omit this parameter and encryption by default is not enabled. You cannot set this parameter to false. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

pub encrypted: Option, - ///

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use to encrypt the volume. This parameter is only required if you want to use a customer-managed CMK; if this parameter is not specified, your AWS-managed CMK for the account is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID: For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias: For example, alias/ExampleAlias.

  • Key ARN: The key ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN: The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. The action will eventually fail.

+ ///

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

pub kms_key_id: Option, ///

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query Requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

pub presigned_url: Option, @@ -5641,7 +5747,7 @@ impl CpuOptionsDeserializer { pub struct CpuOptionsRequest { ///

The number of CPU cores for the instance.

pub core_count: Option, - ///

The number of threads per CPU core. To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. Otherwise, specify the default value of 2.

+ ///

The number of threads per CPU core. To disable multithreading for the instance, specify a value of 1. Otherwise, specify the default value of 2.

pub threads_per_core: Option, } @@ -5666,7 +5772,9 @@ impl CpuOptionsRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateCapacityReservationRequest { ///

The Availability Zone in which to create the Capacity Reservation.

- pub availability_zone: String, + pub availability_zone: Option, + ///

The ID of the Availability Zone in which to create the Capacity Reservation.

+ pub availability_zone_id: Option, ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

pub client_token: Option, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

@@ -5702,10 +5810,12 @@ impl CreateCapacityReservationRequestSerializer { prefix.push_str("."); } - params.put( - &format!("{}{}", prefix, "AvailabilityZone"), - &obj.availability_zone, - ); + if let Some(ref field_value) = obj.availability_zone { + params.put(&format!("{}{}", prefix, "AvailabilityZone"), &field_value); + } + if let Some(ref field_value) = obj.availability_zone_id { + params.put(&format!("{}{}", prefix, "AvailabilityZoneId"), &field_value); + } if let Some(ref field_value) = obj.client_token { params.put(&format!("{}{}", prefix, "ClientToken"), &field_value); } @@ -5796,12 +5906,14 @@ pub struct CreateClientVpnEndpointRequest { pub connection_log_options: ConnectionLogOptions, ///

A brief description of the Client VPN endpoint.

pub description: Option, - ///

Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address of the VPC that is to be associated with Client VPN endpoint is used as the DNS server.

+ ///

Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address configured on the device is used for the DNS server.

pub dns_servers: Option>, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, ///

The ARN of the server certificate. For more information, see the AWS Certificate Manager User Guide.

pub server_certificate_arn: String, + ///

Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint.

By default, split-tunnel on a VPN endpoint is disabled.

For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client VPN Endpoint in the AWS Client VPN Administrator Guide.

+ pub split_tunnel: Option, ///

The tags to apply to the Client VPN endpoint during creation.

pub tag_specifications: Option>, ///

The transport protocol to be used by the VPN session.

Default value: udp

@@ -5851,6 +5963,9 @@ impl CreateClientVpnEndpointRequestSerializer { &format!("{}{}", prefix, "ServerCertificateArn"), &obj.server_certificate_arn, ); + if let Some(ref field_value) = obj.split_tunnel { + params.put(&format!("{}{}", prefix, "SplitTunnel"), &field_value); + } if let Some(ref field_value) = obj.tag_specifications { TagSpecificationListSerializer::serialize( params, @@ -5991,10 +6106,12 @@ impl CreateClientVpnRouteResultDeserializer { pub struct CreateCustomerGatewayRequest { ///

For devices that support BGP, the customer gateway's BGP ASN.

Default: 65000

pub bgp_asn: i64, + ///

The Amazon Resource Name (ARN) for the customer gateway certificate.

+ pub certificate_arn: Option, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, ///

The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

- pub public_ip: String, + pub public_ip: Option, ///

The type of VPN connection that this customer gateway supports (ipsec.1).

pub type_: String, } @@ -6009,10 +6126,15 @@ impl CreateCustomerGatewayRequestSerializer { } params.put(&format!("{}{}", prefix, "BgpAsn"), &obj.bgp_asn); + if let Some(ref field_value) = obj.certificate_arn { + params.put(&format!("{}{}", prefix, "CertificateArn"), &field_value); + } if let Some(ref field_value) = obj.dry_run { params.put(&format!("{}{}", prefix, "DryRun"), &field_value); } - params.put(&format!("{}{}", prefix, "IpAddress"), &obj.public_ip); + if let Some(ref field_value) = obj.public_ip { + params.put(&format!("{}{}", prefix, "IpAddress"), &field_value); + } params.put(&format!("{}{}", prefix, "Type"), &obj.type_); } } @@ -6423,7 +6545,7 @@ pub struct CreateFleetRequest { pub excess_capacity_termination_policy: Option, ///

The configuration for the EC2 Fleet.

pub launch_template_configs: Vec, - ///

The allocation strategy of On-Demand Instances in an EC2 Fleet.

+ ///

Describes the configuration of On-Demand Instances in an EC2 Fleet.

pub on_demand_options: Option, ///

Indicates whether EC2 Fleet should replace unhealthy instances.

pub replace_unhealthy_instances: Option, @@ -6431,7 +6553,7 @@ pub struct CreateFleetRequest { pub spot_options: Option, ///

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.

pub tag_specifications: Option>, - ///

The TotalTargetCapacity, OnDemandTargetCapacity, SpotTargetCapacity, and DefaultCapacityType structure.

+ ///

The number of units to request.

pub target_capacity_specification: TargetCapacitySpecificationRequest, ///

Indicates whether running instances should be terminated when the EC2 Fleet expires.

pub terminate_instances_with_expiration: Option, @@ -6573,6 +6695,8 @@ pub struct CreateFlowLogsRequest { pub log_destination: Option, ///

Specifies the type of destination to which the flow log data is to be published. Flow log data can be published to CloudWatch Logs or Amazon S3. To publish flow log data to CloudWatch Logs, specify cloud-watch-logs. To publish flow log data to Amazon S3, specify s3.

If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn or LogGroupName.

Default: cloud-watch-logs

pub log_destination_type: Option, + ///

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow Log Records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the AWS CLI, use single quotation marks (' ') to surround the parameter value.

Only applicable to flow logs that are published to an Amazon S3 bucket.

+ pub log_format: Option, ///

The name of a new or existing CloudWatch Logs log group where Amazon EC2 publishes your flow logs.

If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn or LogGroupName.

pub log_group_name: Option, ///

The ID of the subnet, network interface, or VPC for which you want to create a flow log.

Constraints: Maximum of 1000 resources

@@ -6610,6 +6734,9 @@ impl CreateFlowLogsRequestSerializer { if let Some(ref field_value) = obj.log_destination_type { params.put(&format!("{}{}", prefix, "LogDestinationType"), &field_value); } + if let Some(ref field_value) = obj.log_format { + params.put(&format!("{}{}", prefix, "LogFormat"), &field_value); + } if let Some(ref field_value) = obj.log_group_name { params.put(&format!("{}{}", prefix, "LogGroupName"), &field_value); } @@ -6814,7 +6941,6 @@ impl CreateImageResultDeserializer { }) } } -///

Contains the parameters for CreateInstanceExportTask.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CreateInstanceExportTaskRequest { ///

A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

@@ -6853,7 +6979,6 @@ impl CreateInstanceExportTaskRequestSerializer { } } -///

Contains the output for CreateInstanceExportTask.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CreateInstanceExportTaskResult { ///

Information about the instance export task.

@@ -6969,6 +7094,8 @@ pub struct CreateLaunchTemplateRequest { pub launch_template_data: RequestLaunchTemplateData, ///

A name for the launch template.

pub launch_template_name: String, + ///

The tags to apply to the launch template during creation.

+ pub tag_specifications: Option>, ///

A description for the first version of the launch template.

pub version_description: Option, } @@ -6997,6 +7124,13 @@ impl CreateLaunchTemplateRequestSerializer { &format!("{}{}", prefix, "LaunchTemplateName"), &obj.launch_template_name, ); + if let Some(ref field_value) = obj.tag_specifications { + TagSpecificationListSerializer::serialize( + params, + &format!("{}{}", prefix, "TagSpecification"), + field_value, + ); + } if let Some(ref field_value) = obj.version_description { params.put(&format!("{}{}", prefix, "VersionDescription"), &field_value); } @@ -7046,7 +7180,7 @@ pub struct CreateLaunchTemplateVersionRequest { pub launch_template_id: Option, ///

The name of the launch template. You must specify either the launch template ID or launch template name in the request.

pub launch_template_name: Option, - ///

The version number of the launch template version on which to base the new version. The new version inherits the same launch parameters as the source version, except for parameters that you specify in LaunchTemplateData.

+ ///

The version number of the launch template version on which to base the new version. The new version inherits the same launch parameters as the source version, except for parameters that you specify in LaunchTemplateData. Snapshots applied to the block device mapping are ignored when creating a new version unless they are explicitly included.

pub source_version: Option, ///

A description for the version of the launch template.

pub version_description: Option, @@ -7375,7 +7509,7 @@ pub struct CreateNetworkInterfaceRequest { pub dry_run: Option, ///

The IDs of one or more security groups.

pub groups: Option>, - ///

Indicates the type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide.

If you are not creating an EFA, specify interface or omit this parameter.

+ ///

Indicates the type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide.

pub interface_type: Option, ///

The number of IPv6 addresses to assign to a network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. You can't use this option if specifying specific IPv6 addresses. If your subnet has the AssignIpv6AddressOnCreation attribute set to true, you can specify 0 to override this setting.

pub ipv_6_address_count: Option, @@ -7848,7 +7982,7 @@ impl CreateSnapshotRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateSnapshotsRequest { - ///

Copies the tags from the specified instance to all snapshots.

+ ///

Copies the tags from the specified volume to corresponding snapshot.

pub copy_tags_from_source: Option, ///

A description propagated to every snapshot specified by the instance.

pub description: Option, @@ -8079,6 +8213,409 @@ impl CreateTagsRequestSerializer { } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorFilterRequest { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

The description of the Traffic Mirror filter.

+ pub description: Option, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The tags to assign to a Traffic Mirror filter.

+ pub tag_specifications: Option>, +} + +/// Serialize `CreateTrafficMirrorFilterRequest` contents to a `SignedRequest`. +struct CreateTrafficMirrorFilterRequestSerializer; +impl CreateTrafficMirrorFilterRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &CreateTrafficMirrorFilterRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.client_token { + params.put(&format!("{}{}", prefix, "ClientToken"), &field_value); + } + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.tag_specifications { + TagSpecificationListSerializer::serialize( + params, + &format!("{}{}", prefix, "TagSpecification"), + field_value, + ); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorFilterResult { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

Information about the Traffic Mirror filter.

+ pub traffic_mirror_filter: Option, +} + +struct CreateTrafficMirrorFilterResultDeserializer; +impl CreateTrafficMirrorFilterResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, CreateTrafficMirrorFilterResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "clientToken" => { + obj.client_token = + Some(StringDeserializer::deserialize("clientToken", stack)?); + } + "trafficMirrorFilter" => { + obj.traffic_mirror_filter = + Some(TrafficMirrorFilterDeserializer::deserialize( + "trafficMirrorFilter", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorFilterRuleRequest { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

The description of the Traffic Mirror rule.

+ pub description: Option, + ///

The destination CIDR block to assign to the Traffic Mirror rule.

+ pub destination_cidr_block: String, + ///

The destination port range.

+ pub destination_port_range: Option, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The protocol, for example UDP, to assign to the Traffic Mirror rule.

For information about the protocol value, see Protocol Numbers on the Internet Assigned Numbers Authority (IANA) website.

+ pub protocol: Option, + ///

The action to take (accept | reject) on the filtered traffic.

+ pub rule_action: String, + ///

The number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number.

+ pub rule_number: i64, + ///

The source CIDR block to assign to the Traffic Mirror rule.

+ pub source_cidr_block: String, + ///

The source port range.

+ pub source_port_range: Option, + ///

The type of traffic (ingress | egress).

+ pub traffic_direction: String, + ///

The ID of the filter that this rule is associated with.

+ pub traffic_mirror_filter_id: String, +} + +/// Serialize `CreateTrafficMirrorFilterRuleRequest` contents to a `SignedRequest`. +struct CreateTrafficMirrorFilterRuleRequestSerializer; +impl CreateTrafficMirrorFilterRuleRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &CreateTrafficMirrorFilterRuleRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.client_token { + params.put(&format!("{}{}", prefix, "ClientToken"), &field_value); + } + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + params.put( + &format!("{}{}", prefix, "DestinationCidrBlock"), + &obj.destination_cidr_block, + ); + if let Some(ref field_value) = obj.destination_port_range { + TrafficMirrorPortRangeRequestSerializer::serialize( + params, + &format!("{}{}", prefix, "DestinationPortRange"), + field_value, + ); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.protocol { + params.put(&format!("{}{}", prefix, "Protocol"), &field_value); + } + params.put(&format!("{}{}", prefix, "RuleAction"), &obj.rule_action); + params.put(&format!("{}{}", prefix, "RuleNumber"), &obj.rule_number); + params.put( + &format!("{}{}", prefix, "SourceCidrBlock"), + &obj.source_cidr_block, + ); + if let Some(ref field_value) = obj.source_port_range { + TrafficMirrorPortRangeRequestSerializer::serialize( + params, + &format!("{}{}", prefix, "SourcePortRange"), + field_value, + ); + } + params.put( + &format!("{}{}", prefix, "TrafficDirection"), + &obj.traffic_direction, + ); + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterId"), + &obj.traffic_mirror_filter_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorFilterRuleResult { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

The Traffic Mirror rule.

+ pub traffic_mirror_filter_rule: Option, +} + +struct CreateTrafficMirrorFilterRuleResultDeserializer; +impl CreateTrafficMirrorFilterRuleResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, CreateTrafficMirrorFilterRuleResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "clientToken" => { + obj.client_token = + Some(StringDeserializer::deserialize("clientToken", stack)?); + } + "trafficMirrorFilterRule" => { + obj.traffic_mirror_filter_rule = + Some(TrafficMirrorFilterRuleDeserializer::deserialize( + "trafficMirrorFilterRule", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorSessionRequest { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

The description of the Traffic Mirror session.

+ pub description: Option, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the source network interface.

+ pub network_interface_id: String, + ///

The number of bytes in each packet to mirror. These are bytes after the VXLAN header. Do not specify this parameter when you want to mirror the entire packet. To mirror a subset of the packet, set this to the length (in bytes) that you want to mirror. For example, if you set this value to 1network0, then the first 100 bytes that meet the filter criteria are copied to the target.

If you do not want to mirror the entire packet, use the PacketLength parameter to specify the number of bytes in each packet to mirror.

+ pub packet_length: Option, + ///

The session number determines the order in which sessions are evaluated when an interface is used by multiple sessions. The first session with a matching filter is the one that mirrors the packets.

Valid values are 1-32766.

+ pub session_number: i64, + ///

The tags to assign to a Traffic Mirror session.

+ pub tag_specifications: Option>, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: String, + ///

The ID of the Traffic Mirror target.

+ pub traffic_mirror_target_id: String, + ///

The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique id is chosen at random.

+ pub virtual_network_id: Option, +} + +/// Serialize `CreateTrafficMirrorSessionRequest` contents to a `SignedRequest`. +struct CreateTrafficMirrorSessionRequestSerializer; +impl CreateTrafficMirrorSessionRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &CreateTrafficMirrorSessionRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.client_token { + params.put(&format!("{}{}", prefix, "ClientToken"), &field_value); + } + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put( + &format!("{}{}", prefix, "NetworkInterfaceId"), + &obj.network_interface_id, + ); + if let Some(ref field_value) = obj.packet_length { + params.put(&format!("{}{}", prefix, "PacketLength"), &field_value); + } + params.put( + &format!("{}{}", prefix, "SessionNumber"), + &obj.session_number, + ); + if let Some(ref field_value) = obj.tag_specifications { + TagSpecificationListSerializer::serialize( + params, + &format!("{}{}", prefix, "TagSpecification"), + field_value, + ); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterId"), + &obj.traffic_mirror_filter_id, + ); + params.put( + &format!("{}{}", prefix, "TrafficMirrorTargetId"), + &obj.traffic_mirror_target_id, + ); + if let Some(ref field_value) = obj.virtual_network_id { + params.put(&format!("{}{}", prefix, "VirtualNetworkId"), &field_value); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorSessionResult { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

Information about the Traffic Mirror session.

+ pub traffic_mirror_session: Option, +} + +struct CreateTrafficMirrorSessionResultDeserializer; +impl CreateTrafficMirrorSessionResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, CreateTrafficMirrorSessionResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "clientToken" => { + obj.client_token = + Some(StringDeserializer::deserialize("clientToken", stack)?); + } + "trafficMirrorSession" => { + obj.traffic_mirror_session = + Some(TrafficMirrorSessionDeserializer::deserialize( + "trafficMirrorSession", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorTargetRequest { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

The description of the Traffic Mirror target.

+ pub description: Option, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The network interface ID that is associated with the target.

+ pub network_interface_id: Option, + ///

The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.

+ pub network_load_balancer_arn: Option, + ///

The tags to assign to the Traffic Mirror target.

+ pub tag_specifications: Option>, +} + +/// Serialize `CreateTrafficMirrorTargetRequest` contents to a `SignedRequest`. +struct CreateTrafficMirrorTargetRequestSerializer; +impl CreateTrafficMirrorTargetRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &CreateTrafficMirrorTargetRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.client_token { + params.put(&format!("{}{}", prefix, "ClientToken"), &field_value); + } + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.network_interface_id { + params.put(&format!("{}{}", prefix, "NetworkInterfaceId"), &field_value); + } + if let Some(ref field_value) = obj.network_load_balancer_arn { + params.put( + &format!("{}{}", prefix, "NetworkLoadBalancerArn"), + &field_value, + ); + } + if let Some(ref field_value) = obj.tag_specifications { + TagSpecificationListSerializer::serialize( + params, + &format!("{}{}", prefix, "TagSpecification"), + field_value, + ); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct CreateTrafficMirrorTargetResult { + ///

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

+ pub client_token: Option, + ///

Information about the Traffic Mirror target.

+ pub traffic_mirror_target: Option, +} + +struct CreateTrafficMirrorTargetResultDeserializer; +impl CreateTrafficMirrorTargetResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, CreateTrafficMirrorTargetResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "clientToken" => { + obj.client_token = + Some(StringDeserializer::deserialize("clientToken", stack)?); + } + "trafficMirrorTarget" => { + obj.traffic_mirror_target = + Some(TrafficMirrorTargetDeserializer::deserialize( + "trafficMirrorTarget", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateTransitGatewayRequest { ///

A description of the transit gateway.

@@ -8538,11 +9075,11 @@ pub struct CreateVolumeRequest { pub availability_zone: String, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

Specifies the encryption state of the volume. The default effect of setting the Encrypted parameter to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether account-level encryption is enabled. Each default case can be overridden by specifying a customer master key (CMK) using the KmsKeyId parameter, in addition to setting Encrypted to true. For a complete list of possible encryption cases, see Amazon EBS Encryption.

Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

+ ///

Specifies whether the volume should be encrypted. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Encryption by Default in the Amazon Elastic Compute Cloud User Guide.

Encrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

pub encrypted: Option, ///

The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

This parameter is valid only for Provisioned IOPS SSD (io1) volumes.

pub iops: Option, - ///

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use to encrypt the volume. This parameter is only required if you want to use a customer-managed CMK; if this parameter is not specified, your AWS-managed CMK for the account is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID: For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias: For example, alias/ExampleAlias.

  • Key ARN: The key ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN: The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. The action will eventually fail.

+ ///

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

pub kms_key_id: Option, ///

The size of the volume, in GiBs.

Constraints: 1-16,384 for gp2, 4-16,384 for io1, 500-16,384 for st1, 500-16,384 for sc1, and 1-1,024 for standard. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

At least one of Size or SnapshotId is required.

pub size: Option, @@ -8550,7 +9087,7 @@ pub struct CreateVolumeRequest { pub snapshot_id: Option, ///

The tags to apply to the volume during creation.

pub tag_specifications: Option>, - ///

The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

Defaults: If no volume type is specified, the default is standard in us-east-1, eu-west-1, eu-central-1, us-west-2, us-west-1, sa-east-1, ap-northeast-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, us-gov-west-1, and cn-north-1. In all other Regions, EBS defaults to gp2.

+ ///

The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

Default: gp2

pub volume_type: Option, } @@ -9025,7 +9562,7 @@ pub struct CreateVpnConnectionRequest { pub options: Option, ///

The ID of the transit gateway. If you specify a transit gateway, you cannot specify a virtual private gateway.

pub transit_gateway_id: Option, - ///

The type of VPN connection (ipsec.1 | ipsec.2).

+ ///

The type of VPN connection (ipsec.1).

pub type_: String, ///

The ID of the virtual private gateway. If you specify a virtual private gateway, you cannot specify a transit gateway.

pub vpn_gateway_id: Option, @@ -9247,6 +9784,8 @@ impl CurrencyCodeValuesDeserializer { pub struct CustomerGateway { ///

The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

pub bgp_asn: Option, + ///

The Amazon Resource Name (ARN) for the customer gateway certificate.

+ pub certificate_arn: Option, ///

The ID of the customer gateway.

pub customer_gateway_id: Option, ///

The Internet-routable IP address of the customer gateway's outside interface.

@@ -9271,6 +9810,10 @@ impl CustomerGatewayDeserializer { "bgpAsn" => { obj.bgp_asn = Some(StringDeserializer::deserialize("bgpAsn", stack)?); } + "certificateArn" => { + obj.certificate_arn = + Some(StringDeserializer::deserialize("certificateArn", stack)?); + } "customerGatewayId" => { obj.customer_gateway_id = Some(StringDeserializer::deserialize("customerGatewayId", stack)?); @@ -10698,6 +11241,238 @@ impl DeleteTagsRequestSerializer { } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorFilterRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: String, +} + +/// Serialize `DeleteTrafficMirrorFilterRequest` contents to a `SignedRequest`. +struct DeleteTrafficMirrorFilterRequestSerializer; +impl DeleteTrafficMirrorFilterRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DeleteTrafficMirrorFilterRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterId"), + &obj.traffic_mirror_filter_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorFilterResult { + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: Option, +} + +struct DeleteTrafficMirrorFilterResultDeserializer; +impl DeleteTrafficMirrorFilterResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DeleteTrafficMirrorFilterResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorFilterId" => { + obj.traffic_mirror_filter_id = Some(StringDeserializer::deserialize( + "trafficMirrorFilterId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorFilterRuleRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the Traffic Mirror rule.

+ pub traffic_mirror_filter_rule_id: String, +} + +/// Serialize `DeleteTrafficMirrorFilterRuleRequest` contents to a `SignedRequest`. +struct DeleteTrafficMirrorFilterRuleRequestSerializer; +impl DeleteTrafficMirrorFilterRuleRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DeleteTrafficMirrorFilterRuleRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterRuleId"), + &obj.traffic_mirror_filter_rule_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorFilterRuleResult { + ///

The ID of the deleted Traffic Mirror rule.

+ pub traffic_mirror_filter_rule_id: Option, +} + +struct DeleteTrafficMirrorFilterRuleResultDeserializer; +impl DeleteTrafficMirrorFilterRuleResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DeleteTrafficMirrorFilterRuleResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorFilterRuleId" => { + obj.traffic_mirror_filter_rule_id = Some(StringDeserializer::deserialize( + "trafficMirrorFilterRuleId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorSessionRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the Traffic Mirror session.

+ pub traffic_mirror_session_id: String, +} + +/// Serialize `DeleteTrafficMirrorSessionRequest` contents to a `SignedRequest`. +struct DeleteTrafficMirrorSessionRequestSerializer; +impl DeleteTrafficMirrorSessionRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DeleteTrafficMirrorSessionRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorSessionId"), + &obj.traffic_mirror_session_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorSessionResult { + ///

The ID of the deleted Traffic Mirror session.

+ pub traffic_mirror_session_id: Option, +} + +struct DeleteTrafficMirrorSessionResultDeserializer; +impl DeleteTrafficMirrorSessionResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DeleteTrafficMirrorSessionResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorSessionId" => { + obj.traffic_mirror_session_id = Some(StringDeserializer::deserialize( + "trafficMirrorSessionId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorTargetRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the Traffic Mirror target.

+ pub traffic_mirror_target_id: String, +} + +/// Serialize `DeleteTrafficMirrorTargetRequest` contents to a `SignedRequest`. +struct DeleteTrafficMirrorTargetRequestSerializer; +impl DeleteTrafficMirrorTargetRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DeleteTrafficMirrorTargetRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorTargetId"), + &obj.traffic_mirror_target_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DeleteTrafficMirrorTargetResult { + ///

The ID of the deleted Traffic Mirror target.

+ pub traffic_mirror_target_id: Option, +} + +struct DeleteTrafficMirrorTargetResultDeserializer; +impl DeleteTrafficMirrorTargetResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DeleteTrafficMirrorTargetResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorTargetId" => { + obj.traffic_mirror_target_id = Some(StringDeserializer::deserialize( + "trafficMirrorTargetId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteTransitGatewayRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

@@ -12428,7 +13203,6 @@ impl DescribeConversionTaskListDeserializer { }) } } -///

Contains the parameters for DescribeConversionTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeConversionTasksRequest { ///

The conversion task IDs.

@@ -12459,7 +13233,6 @@ impl DescribeConversionTasksRequestSerializer { } } -///

Contains the output for DescribeConversionTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeConversionTasksResult { ///

Information about the conversion tasks.

@@ -12819,7 +13592,94 @@ impl DescribeElasticGpusResultDeserializer { ) } } -///

Contains the parameters for DescribeExportTasks.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeExportImageTasksRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The IDs of the export image tasks.

+ pub export_image_task_ids: Option>, + ///

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, or deleted.

+ pub filters: Option>, + ///

The maximum number of results to return in a single call.

+ pub max_results: Option, + ///

A token that indicates the next page of results.

+ pub next_token: Option, +} + +/// Serialize `DescribeExportImageTasksRequest` contents to a `SignedRequest`. +struct DescribeExportImageTasksRequestSerializer; +impl DescribeExportImageTasksRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeExportImageTasksRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.export_image_task_ids { + ExportImageTaskIdListSerializer::serialize( + params, + &format!("{}{}", prefix, "ExportImageTaskId"), + field_value, + ); + } + if let Some(ref field_value) = obj.filters { + FilterListSerializer::serialize( + params, + &format!("{}{}", prefix, "Filter"), + field_value, + ); + } + if let Some(ref field_value) = obj.max_results { + params.put(&format!("{}{}", prefix, "MaxResults"), &field_value); + } + if let Some(ref field_value) = obj.next_token { + params.put(&format!("{}{}", prefix, "NextToken"), &field_value); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeExportImageTasksResult { + ///

Information about the export image tasks.

+ pub export_image_tasks: Option>, + ///

The token to use to get the next page of results. This value is null when there are no more results to return.

+ pub next_token: Option, +} + +struct DescribeExportImageTasksResultDeserializer; +impl DescribeExportImageTasksResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DescribeExportImageTasksResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "exportImageTaskSet" => { + obj.export_image_tasks.get_or_insert(vec![]).extend( + ExportImageTaskListDeserializer::deserialize( + "exportImageTaskSet", + stack, + )?, + ); + } + "nextToken" => { + obj.next_token = + Some(NextTokenDeserializer::deserialize("nextToken", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeExportTasksRequest { ///

The export task IDs.

@@ -12845,7 +13705,6 @@ impl DescribeExportTasksRequestSerializer { } } -///

Contains the output for DescribeExportTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeExportTasksResult { ///

Information about the export tasks.

@@ -13980,7 +14839,7 @@ pub struct DescribeImagesRequest { pub dry_run: Option, ///

Scopes the images by users with explicit launch permissions. Specify an AWS account ID, self (the sender of the request), or all (public AMIs).

pub executable_users: Option>, - ///

The filters.

  • architecture - The image architecture (i386 | x86_64).

  • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

  • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

  • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | io1 | st1 | sc1 | standard).

  • block-device-mapping.encrypted - A Boolean that indicates whether the EBS volume is encrypted.

  • description - The description of the image (provided during image creation).

  • ena-support - A Boolean that indicates whether enhanced networking with ENA is enabled.

  • hypervisor - The hypervisor type (ovm | xen).

  • image-id - The ID of the image.

  • image-type - The image type (machine | kernel | ramdisk).

  • is-public - A Boolean that indicates whether the image is public.

  • kernel-id - The kernel ID.

  • manifest-location - The location of the image manifest.

  • name - The name of the AMI (provided during image creation).

  • owner-alias - String value from an Amazon-maintained list (amazon | aws-marketplace | microsoft) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.

  • owner-id - The AWS account ID of the image owner.

  • platform - The platform. To only list Windows-based AMIs, use windows.

  • product-code - The product code.

  • product-code.type - The type of the product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • state - The state of the image (available | pending | failed).

  • state-reason-code - The reason code for the state change.

  • state-reason-message - The message for the state change.

  • sriov-net-support - A value of simple indicates that enhanced networking with the Intel 82599 VF interface is enabled.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • virtualization-type - The virtualization type (paravirtual | hvm).

+ ///

The filters.

  • architecture - The image architecture (i386 | x86_64 | arm64).

  • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

  • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

  • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | io1 | st1 | sc1 | standard).

  • block-device-mapping.encrypted - A Boolean that indicates whether the EBS volume is encrypted.

  • description - The description of the image (provided during image creation).

  • ena-support - A Boolean that indicates whether enhanced networking with ENA is enabled.

  • hypervisor - The hypervisor type (ovm | xen).

  • image-id - The ID of the image.

  • image-type - The image type (machine | kernel | ramdisk).

  • is-public - A Boolean that indicates whether the image is public.

  • kernel-id - The kernel ID.

  • manifest-location - The location of the image manifest.

  • name - The name of the AMI (provided during image creation).

  • owner-alias - String value from an Amazon-maintained list (amazon | aws-marketplace | microsoft) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.

  • owner-id - The AWS account ID of the image owner.

  • platform - The platform. To only list Windows-based AMIs, use windows.

  • product-code - The product code.

  • product-code.type - The type of the product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • state - The state of the image (available | pending | failed).

  • state-reason-code - The reason code for the state change.

  • state-reason-message - The message for the state change.

  • sriov-net-support - A value of simple indicates that enhanced networking with the Intel 82599 VF interface is enabled.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • virtualization-type - The virtualization type (paravirtual | hvm).

pub filters: Option>, ///

The image IDs.

Default: Describes all images available to you.

pub image_ids: Option>, @@ -14057,16 +14916,15 @@ impl DescribeImagesResultDeserializer { }) } } -///

Contains the parameters for DescribeImportImageTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeImportImageTasksRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, deleted.

+ ///

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, or deleted.

pub filters: Option>, - ///

A list of import image task IDs.

+ ///

The IDs of the import image tasks.

pub import_task_ids: Option>, - ///

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

+ ///

The maximum number of results to return in a single call.

pub max_results: Option, ///

A token that indicates the next page of results.

pub next_token: Option, @@ -14107,7 +14965,6 @@ impl DescribeImportImageTasksRequestSerializer { } } -///

Contains the output for DescribeImportImageTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeImportImageTasksResult { ///

A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.

@@ -14146,7 +15003,6 @@ impl DescribeImportImageTasksResultDeserializer { ) } } -///

Contains the parameters for DescribeImportSnapshotTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeImportSnapshotTasksRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

@@ -14196,7 +15052,6 @@ impl DescribeImportSnapshotTasksRequestSerializer { } } -///

Contains the output for DescribeImportSnapshotTasks.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeImportSnapshotTasksResult { ///

A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.

@@ -14452,7 +15307,7 @@ impl DescribeInstanceStatusResultDeserializer { pub struct DescribeInstancesRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • client-token - The idempotency token you provided when you launched the instance.

  • dns-name - The public DNS name of the instance.

  • group-id - The ID of the security group for the instance. EC2-Classic only.

  • group-name - The name of the security group for the instance. EC2-Classic only.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen).

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched.

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • owner-id - The AWS account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

+ ///

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • client-token - The idempotency token you provided when you launched the instance.

  • dns-name - The public DNS name of the instance.

  • group-id - The ID of the security group for the instance. EC2-Classic only.

  • group-name - The name of the security group for the instance. EC2-Classic only.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen).

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched.

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • owner-id - The AWS account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

pub filters: Option>, ///

The instance IDs.

Default: Describes all your instances.

pub instance_ids: Option>, @@ -15324,7 +16179,7 @@ impl DescribeNetworkInterfacePermissionsResultDeserializer { pub struct DescribeNetworkInterfacesRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

One or more filters.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.attach.time - The time that the network interface was attached to an instance.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.nat-gateway-id - The ID of the NAT gateway to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • group-name - The name of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The AWS account ID of the network interface owner.

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

+ ///

One or more filters.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.nat-gateway-id - The ID of the NAT gateway to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • group-name - The name of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The AWS account ID of the network interface owner.

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

pub filters: Option>, ///

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

pub max_results: Option, @@ -15711,11 +16566,13 @@ impl DescribePublicIpv4PoolsResultDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeRegionsRequest { + ///

Indicates whether to display all Regions, including Regions that are disabled for your account.

+ pub all_regions: Option, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

The filters.

  • endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com).

  • region-name - The name of the Region (for example, us-east-1).

+ ///

The filters.

  • endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com).

  • opt-in-status - The opt-in status of the Region (opt-in-not-required | opted-in | not-opted-in).

  • region-name - The name of the Region (for example, us-east-1).

pub filters: Option>, - ///

The names of the Regions.

+ ///

The names of the Regions. You can specify any Regions, whether they are enabled and disabled for your account.

pub region_names: Option>, } @@ -15728,6 +16585,9 @@ impl DescribeRegionsRequestSerializer { prefix.push_str("."); } + if let Some(ref field_value) = obj.all_regions { + params.put(&format!("{}{}", prefix, "AllRegions"), &field_value); + } if let Some(ref field_value) = obj.dry_run { params.put(&format!("{}{}", prefix, "DryRun"), &field_value); } @@ -17527,23 +18387,23 @@ impl DescribeTagsResultDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] -pub struct DescribeTransitGatewayAttachmentsRequest { +pub struct DescribeTrafficMirrorFiltersRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

One or more filters. The possible values are:

  • association.state - The state of the association (associating | associated | disassociating).

  • association.transit-gateway-route-table-id - The ID of the route table for the transit gateway.

  • resource-id - The ID of the resource.

  • resource-owner-id - The ID of the AWS account that owns the resource.

  • resource-type - The resource type (vpc | vpn).

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-attachment-id - The ID of the attachment.

  • transit-gateway-id - The ID of the transit gateway.

  • transit-gateway-owner-id - The ID of the AWS account that owns the transit gateway.

+ ///

One or more filters. The possible values are:

  • description: The Traffic Mirror filter description.

  • traffic-mirror-filter-id: The ID of the Traffic Mirror filter.

pub filters: Option>, ///

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

pub max_results: Option, ///

The token for the next page of results.

pub next_token: Option, - ///

The IDs of the attachments.

- pub transit_gateway_attachment_ids: Option>, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_ids: Option>, } -/// Serialize `DescribeTransitGatewayAttachmentsRequest` contents to a `SignedRequest`. -struct DescribeTransitGatewayAttachmentsRequestSerializer; -impl DescribeTransitGatewayAttachmentsRequestSerializer { - fn serialize(params: &mut Params, name: &str, obj: &DescribeTransitGatewayAttachmentsRequest) { +/// Serialize `DescribeTrafficMirrorFiltersRequest` contents to a `SignedRequest`. +struct DescribeTrafficMirrorFiltersRequestSerializer; +impl DescribeTrafficMirrorFiltersRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeTrafficMirrorFiltersRequest) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); @@ -17565,10 +18425,10 @@ impl DescribeTransitGatewayAttachmentsRequestSerializer { if let Some(ref field_value) = obj.next_token { params.put(&format!("{}{}", prefix, "NextToken"), &field_value); } - if let Some(ref field_value) = obj.transit_gateway_attachment_ids { - TransitGatewayAttachmentIdStringListSerializer::serialize( + if let Some(ref field_value) = obj.traffic_mirror_filter_ids { + ValueStringListSerializer::serialize( params, - &format!("{}{}", prefix, "TransitGatewayAttachmentIds"), + &format!("{}{}", prefix, "TrafficMirrorFilterId"), field_value, ); } @@ -17576,21 +18436,21 @@ impl DescribeTransitGatewayAttachmentsRequestSerializer { } #[derive(Default, Debug, Clone, PartialEq)] -pub struct DescribeTransitGatewayAttachmentsResult { - ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

+pub struct DescribeTrafficMirrorFiltersResult { + ///

The token to use to retrieve the next page of results. The value is null when there are no more results to return.

pub next_token: Option, - ///

Information about the attachments.

- pub transit_gateway_attachments: Option>, + ///

Information about one or more Traffic Mirror filters.

+ pub traffic_mirror_filters: Option>, } -struct DescribeTransitGatewayAttachmentsResultDeserializer; -impl DescribeTransitGatewayAttachmentsResultDeserializer { +struct DescribeTrafficMirrorFiltersResultDeserializer; +impl DescribeTrafficMirrorFiltersResultDeserializer { #[allow(unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, - ) -> Result { - deserialize_elements::<_, DescribeTransitGatewayAttachmentsResult, _>( + ) -> Result { + deserialize_elements::<_, DescribeTrafficMirrorFiltersResult, _>( tag_name, stack, |name, stack, obj| { @@ -17598,13 +18458,13 @@ impl DescribeTransitGatewayAttachmentsResultDeserializer { "nextToken" => { obj.next_token = Some(StringDeserializer::deserialize("nextToken", stack)?); } - "transitGatewayAttachments" => { - obj.transit_gateway_attachments - .get_or_insert(vec![]) - .extend(TransitGatewayAttachmentListDeserializer::deserialize( - "transitGatewayAttachments", + "trafficMirrorFilterSet" => { + obj.traffic_mirror_filters.get_or_insert(vec![]).extend( + TrafficMirrorFilterSetDeserializer::deserialize( + "trafficMirrorFilterSet", stack, - )?); + )?, + ); } _ => skip_tree(stack), } @@ -17614,23 +18474,284 @@ impl DescribeTransitGatewayAttachmentsResultDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] -pub struct DescribeTransitGatewayRouteTablesRequest { +pub struct DescribeTrafficMirrorSessionsRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

One or more filters. The possible values are:

  • default-association-route-table - Indicates whether this is the default association route table for the transit gateway (true | false).

  • default-propagation-route-table - Indicates whether this is the default propagation route table for the transit gateway (true | false).

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-id - The ID of the transit gateway.

  • transit-gateway-route-table-id - The ID of the transit gateway route table.

+ ///

One or more filters. The possible values are:

  • description: The Traffic Mirror session description.

  • network-interface-id: The ID of the Traffic Mirror session network interface.

  • owner-id: The ID of the account that owns the Traffic Mirror session.

  • packet-length: The assigned number of packets to mirror.

  • session-number: The assigned session number.

  • traffic-mirror-filter-id: The ID of the Traffic Mirror filter.

  • traffic-mirror-session-id: The ID of the Traffic Mirror session.

  • traffic-mirror-target-id: The ID of the Traffic Mirror target.

  • virtual-network-id: The virtual network ID of the Traffic Mirror session.

pub filters: Option>, ///

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

pub max_results: Option, ///

The token for the next page of results.

pub next_token: Option, - ///

The IDs of the transit gateway route tables.

- pub transit_gateway_route_table_ids: Option>, + ///

The ID of the Traffic Mirror session.

+ pub traffic_mirror_session_ids: Option>, } -/// Serialize `DescribeTransitGatewayRouteTablesRequest` contents to a `SignedRequest`. -struct DescribeTransitGatewayRouteTablesRequestSerializer; -impl DescribeTransitGatewayRouteTablesRequestSerializer { - fn serialize(params: &mut Params, name: &str, obj: &DescribeTransitGatewayRouteTablesRequest) { +/// Serialize `DescribeTrafficMirrorSessionsRequest` contents to a `SignedRequest`. +struct DescribeTrafficMirrorSessionsRequestSerializer; +impl DescribeTrafficMirrorSessionsRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeTrafficMirrorSessionsRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.filters { + FilterListSerializer::serialize( + params, + &format!("{}{}", prefix, "Filter"), + field_value, + ); + } + if let Some(ref field_value) = obj.max_results { + params.put(&format!("{}{}", prefix, "MaxResults"), &field_value); + } + if let Some(ref field_value) = obj.next_token { + params.put(&format!("{}{}", prefix, "NextToken"), &field_value); + } + if let Some(ref field_value) = obj.traffic_mirror_session_ids { + ValueStringListSerializer::serialize( + params, + &format!("{}{}", prefix, "TrafficMirrorSessionId"), + field_value, + ); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeTrafficMirrorSessionsResult { + ///

The token to use to retrieve the next page of results. The value is null when there are no more results to return.

+ pub next_token: Option, + ///

Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror sessions are described. Alternatively, you can filter the results.

+ pub traffic_mirror_sessions: Option>, +} + +struct DescribeTrafficMirrorSessionsResultDeserializer; +impl DescribeTrafficMirrorSessionsResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DescribeTrafficMirrorSessionsResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "nextToken" => { + obj.next_token = Some(StringDeserializer::deserialize("nextToken", stack)?); + } + "trafficMirrorSessionSet" => { + obj.traffic_mirror_sessions.get_or_insert(vec![]).extend( + TrafficMirrorSessionSetDeserializer::deserialize( + "trafficMirrorSessionSet", + stack, + )?, + ); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeTrafficMirrorTargetsRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

One or more filters. The possible values are:

  • description: The Traffic Mirror target description.

  • network-interface-id: The ID of the Traffic Mirror session network interface.

  • network-load-balancer-arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the session.

  • owner-id: The ID of the account that owns the Traffic Mirror session.

  • traffic-mirror-target-id: The ID of the Traffic Mirror target.

+ pub filters: Option>, + ///

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

+ pub max_results: Option, + ///

The token for the next page of results.

+ pub next_token: Option, + ///

The ID of the Traffic Mirror targets.

+ pub traffic_mirror_target_ids: Option>, +} + +/// Serialize `DescribeTrafficMirrorTargetsRequest` contents to a `SignedRequest`. +struct DescribeTrafficMirrorTargetsRequestSerializer; +impl DescribeTrafficMirrorTargetsRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeTrafficMirrorTargetsRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.filters { + FilterListSerializer::serialize( + params, + &format!("{}{}", prefix, "Filter"), + field_value, + ); + } + if let Some(ref field_value) = obj.max_results { + params.put(&format!("{}{}", prefix, "MaxResults"), &field_value); + } + if let Some(ref field_value) = obj.next_token { + params.put(&format!("{}{}", prefix, "NextToken"), &field_value); + } + if let Some(ref field_value) = obj.traffic_mirror_target_ids { + ValueStringListSerializer::serialize( + params, + &format!("{}{}", prefix, "TrafficMirrorTargetId"), + field_value, + ); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeTrafficMirrorTargetsResult { + ///

The token to use to retrieve the next page of results. The value is null when there are no more results to return.

+ pub next_token: Option, + ///

Information about one or more Traffic Mirror targets.

+ pub traffic_mirror_targets: Option>, +} + +struct DescribeTrafficMirrorTargetsResultDeserializer; +impl DescribeTrafficMirrorTargetsResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DescribeTrafficMirrorTargetsResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "nextToken" => { + obj.next_token = Some(StringDeserializer::deserialize("nextToken", stack)?); + } + "trafficMirrorTargetSet" => { + obj.traffic_mirror_targets.get_or_insert(vec![]).extend( + TrafficMirrorTargetSetDeserializer::deserialize( + "trafficMirrorTargetSet", + stack, + )?, + ); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeTransitGatewayAttachmentsRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

One or more filters. The possible values are:

  • association.state - The state of the association (associating | associated | disassociating).

  • association.transit-gateway-route-table-id - The ID of the route table for the transit gateway.

  • resource-id - The ID of the resource.

  • resource-owner-id - The ID of the AWS account that owns the resource.

  • resource-type - The resource type (vpc | vpn).

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-attachment-id - The ID of the attachment.

  • transit-gateway-id - The ID of the transit gateway.

  • transit-gateway-owner-id - The ID of the AWS account that owns the transit gateway.

+ pub filters: Option>, + ///

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

+ pub max_results: Option, + ///

The token for the next page of results.

+ pub next_token: Option, + ///

The IDs of the attachments.

+ pub transit_gateway_attachment_ids: Option>, +} + +/// Serialize `DescribeTransitGatewayAttachmentsRequest` contents to a `SignedRequest`. +struct DescribeTransitGatewayAttachmentsRequestSerializer; +impl DescribeTransitGatewayAttachmentsRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeTransitGatewayAttachmentsRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.filters { + FilterListSerializer::serialize( + params, + &format!("{}{}", prefix, "Filter"), + field_value, + ); + } + if let Some(ref field_value) = obj.max_results { + params.put(&format!("{}{}", prefix, "MaxResults"), &field_value); + } + if let Some(ref field_value) = obj.next_token { + params.put(&format!("{}{}", prefix, "NextToken"), &field_value); + } + if let Some(ref field_value) = obj.transit_gateway_attachment_ids { + TransitGatewayAttachmentIdStringListSerializer::serialize( + params, + &format!("{}{}", prefix, "TransitGatewayAttachmentIds"), + field_value, + ); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeTransitGatewayAttachmentsResult { + ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

+ pub next_token: Option, + ///

Information about the attachments.

+ pub transit_gateway_attachments: Option>, +} + +struct DescribeTransitGatewayAttachmentsResultDeserializer; +impl DescribeTransitGatewayAttachmentsResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, DescribeTransitGatewayAttachmentsResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "nextToken" => { + obj.next_token = Some(StringDeserializer::deserialize("nextToken", stack)?); + } + "transitGatewayAttachments" => { + obj.transit_gateway_attachments + .get_or_insert(vec![]) + .extend(TransitGatewayAttachmentListDeserializer::deserialize( + "transitGatewayAttachments", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct DescribeTransitGatewayRouteTablesRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

One or more filters. The possible values are:

  • default-association-route-table - Indicates whether this is the default association route table for the transit gateway (true | false).

  • default-propagation-route-table - Indicates whether this is the default propagation route table for the transit gateway (true | false).

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-id - The ID of the transit gateway.

  • transit-gateway-route-table-id - The ID of the transit gateway route table.

+ pub filters: Option>, + ///

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

+ pub max_results: Option, + ///

The token for the next page of results.

+ pub next_token: Option, + ///

The IDs of the transit gateway route tables.

+ pub transit_gateway_route_table_ids: Option>, +} + +/// Serialize `DescribeTransitGatewayRouteTablesRequest` contents to a `SignedRequest`. +struct DescribeTransitGatewayRouteTablesRequestSerializer; +impl DescribeTransitGatewayRouteTablesRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &DescribeTransitGatewayRouteTablesRequest) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); @@ -18274,6 +19395,17 @@ impl DescribeVpcAttributeResultDeserializer { ) } } +struct DescribeVpcClassicLinkDnsSupportNextTokenDeserializer; +impl DescribeVpcClassicLinkDnsSupportNextTokenDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeVpcClassicLinkDnsSupportRequest { ///

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

@@ -18330,8 +19462,12 @@ impl DescribeVpcClassicLinkDnsSupportResultDeserializer { |name, stack, obj| { match name { "nextToken" => { - obj.next_token = - Some(NextTokenDeserializer::deserialize("nextToken", stack)?); + obj.next_token = Some( + DescribeVpcClassicLinkDnsSupportNextTokenDeserializer::deserialize( + "nextToken", + stack, + )?, + ); } "vpcs" => { obj.vpcs.get_or_insert(vec![]).extend( @@ -18856,7 +19992,7 @@ impl DescribeVpcEndpointServicesResultDeserializer { pub struct DescribeVpcEndpointsRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

One or more filters.

  • service-name: The name of the service.

  • vpc-id: The ID of the VPC in which the endpoint resides.

  • vpc-endpoint-id: The ID of the endpoint.

  • vpc-endpoint-state: The state of the endpoint. (pending | available | deleting | deleted)

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

+ ///

One or more filters.

  • service-name: The name of the service.

  • vpc-id: The ID of the VPC in which the endpoint resides.

  • vpc-endpoint-id: The ID of the endpoint.

  • vpc-endpoint-state - The state of the endpoint (pendingAcceptance | pending | available | deleting | deleted | rejected | failed).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

pub filters: Option>, ///

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

Constraint: If the value is greater than 1000, we return only 1000 items.

pub max_results: Option, @@ -19338,7 +20474,7 @@ pub struct DetachNetworkInterfaceRequest { pub attachment_id: String, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

Specifies whether to force a detachment.

+ ///

Specifies whether to force a detachment.

  • Use the Force parameter only as a last resort to detach a network interface from a failed instance.

  • If you use the Force parameter to detach a network interface, you might not be able to attach a different network interface to the same index on the instance without first stopping and starting the instance.

  • If you force the detachment of a network interface, the instance metadata might not get updated. This means that the attributes associated with the detached network interface might still be visible. The instance metadata will get updated when you stop and start the instance.

pub force: Option, } @@ -19661,7 +20797,7 @@ impl DisableEbsEncryptionByDefaultRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct DisableEbsEncryptionByDefaultResult { - ///

Account-level encryption status after performing the action.

+ ///

The updated status of encryption by default.

pub ebs_encryption_by_default: Option, } @@ -20544,17 +21680,17 @@ impl DoubleDeserializer { pub struct EbsBlockDevice { ///

Indicates whether the EBS volume is deleted on instance termination.

pub delete_on_termination: Option, - ///

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The default effect of setting the Encrypted parameter to true through the console, API, or CLI depends on the volume's origin (new or from a snapshot), starting encryption state, ownership, and whether account-level encryption is enabled. Each default case can be overridden by specifying a customer master key (CMK) with the KmsKeyId parameter in addition to setting Encrypted to true. For a complete list of possible encryption cases, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

+ ///

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

pub encrypted: Option, ///

The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most Regions. Maximum io1 IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

pub iops: Option, - ///

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

+ ///

Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

pub kms_key_id: Option, ///

The ID of the snapshot.

pub snapshot_id: Option, ///

The size of the volume, in GiB.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

pub volume_size: Option, - ///

The volume type. If you set the type to io1, you must also set the Iops property.

Default: standard

+ ///

The volume type. If you set the type to io1, you must also specify the IOPS that the volume supports.

Default: gp2

pub volume_type: Option, } @@ -21222,7 +22358,7 @@ impl EnableEbsEncryptionByDefaultRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct EnableEbsEncryptionByDefaultResult { - ///

Account-level encryption status after performing the action.

+ ///

The updated status of encryption by default.

pub ebs_encryption_by_default: Option, } @@ -21721,6 +22857,219 @@ impl ExportEnvironmentDeserializer { Ok(obj) } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ExportImageRequest { + ///

Token to enable idempotency for export image requests.

+ pub client_token: Option, + ///

A description of the image being exported. The maximum length is 255 bytes.

+ pub description: Option, + ///

The disk image format.

+ pub disk_image_format: String, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the image.

+ pub image_id: String, + ///

The name of the role that grants VM Import/Export permission to export images to your S3 bucket. If this parameter is not specified, the default role is named 'vmimport'.

+ pub role_name: Option, + ///

Information about the destination S3 bucket. The bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

+ pub s3_export_location: ExportTaskS3LocationRequest, +} + +/// Serialize `ExportImageRequest` contents to a `SignedRequest`. +struct ExportImageRequestSerializer; +impl ExportImageRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ExportImageRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.client_token { + params.put(&format!("{}{}", prefix, "ClientToken"), &field_value); + } + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + params.put( + &format!("{}{}", prefix, "DiskImageFormat"), + &obj.disk_image_format, + ); + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put(&format!("{}{}", prefix, "ImageId"), &obj.image_id); + if let Some(ref field_value) = obj.role_name { + params.put(&format!("{}{}", prefix, "RoleName"), &field_value); + } + ExportTaskS3LocationRequestSerializer::serialize( + params, + &format!("{}{}", prefix, "S3ExportLocation"), + &obj.s3_export_location, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ExportImageResult { + ///

A description of the image being exported.

+ pub description: Option, + ///

The disk image format for the exported image.

+ pub disk_image_format: Option, + ///

The ID of the export image task.

+ pub export_image_task_id: Option, + ///

The ID of the image.

+ pub image_id: Option, + ///

The percent complete of the export image task.

+ pub progress: Option, + ///

The name of the role that grants VM Import/Export permission to export images to your S3 bucket.

+ pub role_name: Option, + ///

Information about the destination S3 bucket.

+ pub s3_export_location: Option, + ///

The status of the export image task. The possible values are active, completed, deleting, and deleted.

+ pub status: Option, + ///

The status message for the export image task.

+ pub status_message: Option, +} + +struct ExportImageResultDeserializer; +impl ExportImageResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ExportImageResult, _>(tag_name, stack, |name, stack, obj| { + match name { + "description" => { + obj.description = Some(StringDeserializer::deserialize("description", stack)?); + } + "diskImageFormat" => { + obj.disk_image_format = Some(DiskImageFormatDeserializer::deserialize( + "diskImageFormat", + stack, + )?); + } + "exportImageTaskId" => { + obj.export_image_task_id = + Some(StringDeserializer::deserialize("exportImageTaskId", stack)?); + } + "imageId" => { + obj.image_id = Some(StringDeserializer::deserialize("imageId", stack)?); + } + "progress" => { + obj.progress = Some(StringDeserializer::deserialize("progress", stack)?); + } + "roleName" => { + obj.role_name = Some(StringDeserializer::deserialize("roleName", stack)?); + } + "s3ExportLocation" => { + obj.s3_export_location = Some(ExportTaskS3LocationDeserializer::deserialize( + "s3ExportLocation", + stack, + )?); + } + "status" => { + obj.status = Some(StringDeserializer::deserialize("status", stack)?); + } + "statusMessage" => { + obj.status_message = + Some(StringDeserializer::deserialize("statusMessage", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +///

Describes an export image task.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct ExportImageTask { + ///

A description of the image being exported.

+ pub description: Option, + ///

The ID of the export image task.

+ pub export_image_task_id: Option, + ///

The ID of the image.

+ pub image_id: Option, + ///

The percent complete of the export image task.

+ pub progress: Option, + ///

Information about the destination S3 bucket.

+ pub s3_export_location: Option, + ///

The status of the export image task. The possible values are active, completed, deleting, and deleted.

+ pub status: Option, + ///

The status message for the export image task.

+ pub status_message: Option, +} + +struct ExportImageTaskDeserializer; +impl ExportImageTaskDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ExportImageTask, _>(tag_name, stack, |name, stack, obj| { + match name { + "description" => { + obj.description = Some(StringDeserializer::deserialize("description", stack)?); + } + "exportImageTaskId" => { + obj.export_image_task_id = + Some(StringDeserializer::deserialize("exportImageTaskId", stack)?); + } + "imageId" => { + obj.image_id = Some(StringDeserializer::deserialize("imageId", stack)?); + } + "progress" => { + obj.progress = Some(StringDeserializer::deserialize("progress", stack)?); + } + "s3ExportLocation" => { + obj.s3_export_location = Some(ExportTaskS3LocationDeserializer::deserialize( + "s3ExportLocation", + stack, + )?); + } + "status" => { + obj.status = Some(StringDeserializer::deserialize("status", stack)?); + } + "statusMessage" => { + obj.status_message = + Some(StringDeserializer::deserialize("statusMessage", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} + +/// Serialize `ExportImageTaskIdList` contents to a `SignedRequest`. +struct ExportImageTaskIdListSerializer; +impl ExportImageTaskIdListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + params.put(&key, &obj); + } + } +} + +struct ExportImageTaskListDeserializer; +impl ExportImageTaskListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(ExportImageTaskDeserializer::deserialize("item", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} ///

Describes an instance export task.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ExportTask { @@ -21807,6 +23156,61 @@ impl ExportTaskListDeserializer { }) } } +///

Describes the destination for an export image task.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct ExportTaskS3Location { + ///

The destination S3 bucket.

+ pub s3_bucket: Option, + ///

The prefix (logical hierarchy) in the bucket.

+ pub s3_prefix: Option, +} + +struct ExportTaskS3LocationDeserializer; +impl ExportTaskS3LocationDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ExportTaskS3Location, _>(tag_name, stack, |name, stack, obj| { + match name { + "s3Bucket" => { + obj.s3_bucket = Some(StringDeserializer::deserialize("s3Bucket", stack)?); + } + "s3Prefix" => { + obj.s3_prefix = Some(StringDeserializer::deserialize("s3Prefix", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +///

Describes the destination for an export image task.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct ExportTaskS3LocationRequest { + ///

The destination S3 bucket.

+ pub s3_bucket: String, + ///

The prefix (logical hierarchy) in the bucket.

+ pub s3_prefix: Option, +} + +/// Serialize `ExportTaskS3LocationRequest` contents to a `SignedRequest`. +struct ExportTaskS3LocationRequestSerializer; +impl ExportTaskS3LocationRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ExportTaskS3LocationRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put(&format!("{}{}", prefix, "S3Bucket"), &obj.s3_bucket); + if let Some(ref field_value) = obj.s3_prefix { + params.put(&format!("{}{}", prefix, "S3Prefix"), &field_value); + } + } +} + struct ExportTaskStateDeserializer; impl ExportTaskStateDeserializer { #[allow(unused_variables)] @@ -21905,7 +23309,7 @@ impl ExportToS3TaskSpecificationSerializer { pub struct ExportTransitGatewayRoutesRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

One or more filters. The possible values are:

  • attachment.transit-gateway-attachment-id- The id of the transit gateway attachment.

  • attachment.resource-id - The resource id of the transit gateway attachment.

  • route-search.exact-match - The exact match of the specified filter.

  • route-search.longest-prefix-match - The longest prefix that matches the route.

  • route-search.subnet-of-match - The routes with a subnet that match the specified CIDR filter.

  • route-search.supernet-of-match - The routes with a CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 routes in your route table and you specify supernet-of-match as 10.0.1.0/30, then the result returns 10.0.1.0/29.

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-route-destination-cidr-block - The CIDR range.

  • type - The type of roue (active | blackhole).

+ ///

One or more filters. The possible values are:

  • attachment.transit-gateway-attachment-id - The id of the transit gateway attachment.

  • attachment.resource-id - The resource id of the transit gateway attachment.

  • route-search.exact-match - The exact match of the specified filter.

  • route-search.longest-prefix-match - The longest prefix that matches the route.

  • route-search.subnet-of-match - The routes with a subnet that match the specified CIDR filter.

  • route-search.supernet-of-match - The routes with a CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 routes in your route table and you specify supernet-of-match as 10.0.1.0/30, then the result returns 10.0.1.0/29.

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-route-destination-cidr-block - The CIDR range.

  • type - The type of route (active | blackhole).

pub filters: Option>, ///

The name of the S3 bucket.

pub s3_bucket: String, @@ -22568,7 +23972,7 @@ pub struct FleetLaunchTemplateSpecificationRequest { pub launch_template_id: Option, ///

The name of the launch template.

pub launch_template_name: Option, - ///

The version number of the launch template.

+ ///

The version number of the launch template. Note: This is a required parameter and will be updated soon.

pub version: Option, } @@ -22673,6 +24077,8 @@ pub struct FlowLog { pub log_destination: Option, ///

Specifies the type of destination to which the flow log data is published. Flow log data can be published to CloudWatch Logs or Amazon S3.

pub log_destination_type: Option, + ///

The format of the flow log record.

+ pub log_format: Option, ///

The name of the flow log group.

pub log_group_name: Option, ///

The ID of the resource on which the flow log was created.

@@ -22727,6 +24133,9 @@ impl FlowLogDeserializer { stack, )?); } + "logFormat" => { + obj.log_format = Some(StringDeserializer::deserialize("logFormat", stack)?); + } "logGroupName" => { obj.log_group_name = Some(StringDeserializer::deserialize("logGroupName", stack)?); @@ -22998,6 +24407,115 @@ impl GatewayTypeDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] +pub struct GetCapacityReservationUsageRequest { + ///

The ID of the Capacity Reservation.

+ pub capacity_reservation_id: String, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value.

Valid range: Minimum value of 1. Maximum value of 1000.

+ pub max_results: Option, + ///

The token to retrieve the next page of results.

+ pub next_token: Option, +} + +/// Serialize `GetCapacityReservationUsageRequest` contents to a `SignedRequest`. +struct GetCapacityReservationUsageRequestSerializer; +impl GetCapacityReservationUsageRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &GetCapacityReservationUsageRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put( + &format!("{}{}", prefix, "CapacityReservationId"), + &obj.capacity_reservation_id, + ); + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.max_results { + params.put(&format!("{}{}", prefix, "MaxResults"), &field_value); + } + if let Some(ref field_value) = obj.next_token { + params.put(&format!("{}{}", prefix, "NextToken"), &field_value); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct GetCapacityReservationUsageResult { + ///

The remaining capacity. Indicates the number of instances that can be launched in the Capacity Reservation.

+ pub available_instance_count: Option, + ///

The ID of the Capacity Reservation.

+ pub capacity_reservation_id: Option, + ///

The type of instance for which the Capacity Reservation reserves capacity.

+ pub instance_type: Option, + ///

Information about the Capacity Reservation usage.

+ pub instance_usages: Option>, + ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

+ pub next_token: Option, + ///

The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

  • active - The Capacity Reservation is active and the capacity is available for your use.

  • expired - The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.

  • cancelled - The Capacity Reservation was manually cancelled. The reserved capacity is no longer available for your use.

  • pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

  • failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

+ pub state: Option, + ///

The number of instances for which the Capacity Reservation reserves capacity.

+ pub total_instance_count: Option, +} + +struct GetCapacityReservationUsageResultDeserializer; +impl GetCapacityReservationUsageResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, GetCapacityReservationUsageResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "availableInstanceCount" => { + obj.available_instance_count = Some(IntegerDeserializer::deserialize( + "availableInstanceCount", + stack, + )?); + } + "capacityReservationId" => { + obj.capacity_reservation_id = Some(StringDeserializer::deserialize( + "capacityReservationId", + stack, + )?); + } + "instanceType" => { + obj.instance_type = + Some(StringDeserializer::deserialize("instanceType", stack)?); + } + "instanceUsageSet" => { + obj.instance_usages.get_or_insert(vec![]).extend( + InstanceUsageSetDeserializer::deserialize("instanceUsageSet", stack)?, + ); + } + "nextToken" => { + obj.next_token = Some(StringDeserializer::deserialize("nextToken", stack)?); + } + "state" => { + obj.state = Some(CapacityReservationStateDeserializer::deserialize( + "state", stack, + )?); + } + "totalInstanceCount" => { + obj.total_instance_count = Some(IntegerDeserializer::deserialize( + "totalInstanceCount", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] pub struct GetConsoleOutputRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, @@ -23146,7 +24664,7 @@ impl GetEbsDefaultKmsKeyIdRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct GetEbsDefaultKmsKeyIdResult { - ///

The full ARN of the default CMK that your account uses to encrypt an EBS volume when no CMK is specified in the API call that creates the volume.

+ ///

The Amazon Resource Name (ARN) of the default CMK for encryption by default.

pub kms_key_id: Option, } @@ -23195,7 +24713,7 @@ impl GetEbsEncryptionByDefaultRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct GetEbsEncryptionByDefaultResult { - ///

Indicates whether default encryption for EBS volumes is enabled or disabled.

+ ///

Indicates whether encryption by default is enabled.

pub ebs_encryption_by_default: Option, } @@ -24569,6 +26087,84 @@ impl HypervisorTypeDeserializer { Ok(obj) } } +struct IKEVersionsListDeserializer; +impl IKEVersionsListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(IKEVersionsListValueDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The internet key exchange (IKE) version permitted for the VPN tunnel.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct IKEVersionsListValue { + ///

The IKE version.

+ pub value: Option, +} + +struct IKEVersionsListValueDeserializer; +impl IKEVersionsListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, IKEVersionsListValue, _>(tag_name, stack, |name, stack, obj| { + match name { + "value" => { + obj.value = Some(StringDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} + +/// Serialize `IKEVersionsRequestList` contents to a `SignedRequest`. +struct IKEVersionsRequestListSerializer; +impl IKEVersionsRequestListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + IKEVersionsRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

The IKE version that is permitted for the VPN tunnel.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct IKEVersionsRequestListValue { + ///

The IKE version.

+ pub value: Option, +} + +/// Serialize `IKEVersionsRequestListValue` contents to a `SignedRequest`. +struct IKEVersionsRequestListValueSerializer; +impl IKEVersionsRequestListValueSerializer { + fn serialize(params: &mut Params, name: &str, obj: &IKEVersionsRequestListValue) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + ///

Describes an IAM instance profile.

#[derive(Default, Debug, Clone, PartialEq)] pub struct IamInstanceProfile { @@ -24871,7 +26467,7 @@ pub struct Image { pub name: Option, ///

The AWS account ID of the image owner.

pub owner_id: Option, - ///

The value is Windows for Windows AMIs; otherwise blank.

+ ///

This value is set to windows for Windows AMIs; otherwise, it is blank.

pub platform: Option, ///

Any product codes associated with the AMI.

pub product_codes: Option>, @@ -25263,10 +26859,9 @@ impl ImportClientVpnClientCertificateRevocationListResultDeserializer { ) } } -///

Contains the parameters for ImportImage.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportImageRequest { - ///

The architecture of the virtual machine.

Valid values: i386 | x86_64

+ ///

The architecture of the virtual machine.

Valid values: i386 | x86_64 | arm64

pub architecture: Option, ///

The client-specific data.

pub client_data: Option, @@ -25284,7 +26879,7 @@ pub struct ImportImageRequest { pub hypervisor: Option, ///

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted AMI. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID

  • Key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

  • ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the AMI is being copied to.

pub kms_key_id: Option, - ///

The license type to be used for the Amazon Machine Image (AMI) after importing.

Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see Prerequisites in the VM Import/Export User Guide.

Valid values include:

  • Auto - Detects the source-system operating system (OS) and applies the appropriate license.

  • AWS - Replaces the source-system license with an AWS license, if appropriate.

  • BYOL - Retains the source-system license, if appropriate.

Default value: Auto

+ ///

The license type to be used for the Amazon Machine Image (AMI) after importing.

By default, we detect the source-system operating system (OS) and apply the appropriate license. Specify AWS to replace the source-system license with an AWS license, if appropriate. Specify BYOL to retain the source-system license, if appropriate.

To use BYOL, you must have existing licenses with rights to use these licenses in a third party cloud, such as AWS. For more information, see Prerequisites in the VM Import/Export User Guide.

pub license_type: Option, ///

The operating system of the virtual machine.

Valid values: Windows | Linux

pub platform: Option, @@ -25348,7 +26943,6 @@ impl ImportImageRequestSerializer { } } -///

Contains the output for ImportImage.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportImageResult { ///

The architecture of the virtual machine.

@@ -25441,7 +27035,7 @@ impl ImportImageResultDeserializer { ///

Describes an import image task.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportImageTask { - ///

The architecture of the virtual machine.

Valid values: i386 | x86_64

+ ///

The architecture of the virtual machine.

Valid values: i386 | x86_64 | arm64

pub architecture: Option, ///

A description of the import task.

pub description: Option, @@ -25636,7 +27230,6 @@ impl ImportInstanceLaunchSpecificationSerializer { } } -///

Contains the parameters for ImportInstance.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportInstanceRequest { ///

A description for the instance being imported.

@@ -25684,7 +27277,6 @@ impl ImportInstanceRequestSerializer { } } -///

Contains the output for ImportInstance.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportInstanceResult { ///

Information about the conversion task.

@@ -25909,7 +27501,6 @@ impl ImportKeyPairResultDeserializer { }) } } -///

Contains the parameters for ImportSnapshot.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportSnapshotRequest { ///

The client-specific data.

@@ -25974,7 +27565,6 @@ impl ImportSnapshotRequestSerializer { } } -///

Contains the output for ImportSnapshot.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportSnapshotResult { ///

A description of the import snapshot task.

@@ -26081,7 +27671,6 @@ impl ImportTaskIdListSerializer { } } -///

Contains the parameters for ImportVolume.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportVolumeRequest { ///

The Availability Zone for the resulting EBS volume.

@@ -26120,7 +27709,6 @@ impl ImportVolumeRequestSerializer { } } -///

Contains the output for ImportVolume.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ImportVolumeResult { ///

Information about the conversion task.

@@ -27592,7 +29180,7 @@ pub struct InstanceNetworkInterfaceSpecification { pub private_ip_addresses: Option>, ///

The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a RunInstances request.

pub secondary_private_ip_address_count: Option, - ///

The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.

+ ///

The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance.

pub subnet_id: Option, } @@ -28253,6 +29841,56 @@ impl InstanceTypeListSerializer { } } +///

Information about the Capacity Reservation usage.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct InstanceUsage { + ///

The ID of the AWS account that is making use of the Capacity Reservation.

+ pub account_id: Option, + ///

The number of instances the AWS account currently has in the Capacity Reservation.

+ pub used_instance_count: Option, +} + +struct InstanceUsageDeserializer; +impl InstanceUsageDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, InstanceUsage, _>(tag_name, stack, |name, stack, obj| { + match name { + "accountId" => { + obj.account_id = Some(StringDeserializer::deserialize("accountId", stack)?); + } + "usedInstanceCount" => { + obj.used_instance_count = Some(IntegerDeserializer::deserialize( + "usedInstanceCount", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +struct InstanceUsageSetDeserializer; +impl InstanceUsageSetDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(InstanceUsageDeserializer::deserialize("item", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} struct IntegerDeserializer; impl IntegerDeserializer { #[allow(unused_variables)] @@ -29596,7 +31234,7 @@ impl LaunchTemplateCpuOptionsDeserializer { pub struct LaunchTemplateCpuOptionsRequest { ///

The number of CPU cores for the instance.

pub core_count: Option, - ///

The number of threads per CPU core. To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. Otherwise, specify the default value of 2.

+ ///

The number of threads per CPU core. To disable multithreading for the instance, specify a value of 1. Otherwise, specify the default value of 2.

pub threads_per_core: Option, } @@ -31407,7 +33045,7 @@ impl ModifyCapacityReservationRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyCapacityReservationResult { - ///

Information about the Capacity Reservation.

+ ///

Returns true if the request succeeds; otherwise, it returns an error.

pub return_: Option, } @@ -31447,6 +33085,8 @@ pub struct ModifyClientVpnEndpointRequest { pub dry_run: Option, ///

The ARN of the server certificate to be used. The server certificate must be provisioned in AWS Certificate Manager (ACM).

pub server_certificate_arn: Option, + ///

Indicates whether the VPN is split-tunnel.

For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client VPN Endpoint in the AWS Client VPN Administrator Guide.

+ pub split_tunnel: Option, } /// Serialize `ModifyClientVpnEndpointRequest` contents to a `SignedRequest`. @@ -31488,6 +33128,9 @@ impl ModifyClientVpnEndpointRequestSerializer { &field_value, ); } + if let Some(ref field_value) = obj.split_tunnel { + params.put(&format!("{}{}", prefix, "SplitTunnel"), &field_value); + } } } @@ -31523,7 +33166,7 @@ impl ModifyClientVpnEndpointResultDeserializer { pub struct ModifyEbsDefaultKmsKeyIdRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use to encrypt the volume. This parameter is only required if you want to use a customer-managed CMK; if this parameter is not specified, your AWS-managed CMK for the account is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID: For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias: For example, alias/ExampleAlias.

  • Key ARN: The key ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN: The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. The action will eventually fail.

+ ///

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

pub kms_key_id: String, } @@ -31545,7 +33188,7 @@ impl ModifyEbsDefaultKmsKeyIdRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyEbsDefaultKmsKeyIdResult { - ///

The full ARN of the default CMK that your account uses to encrypt an EBS volume when no CMK is specified in the API call that creates the volume.

+ ///

The Amazon Resource Name (ARN) of the default CMK for encryption by default.

pub kms_key_id: Option, } @@ -32632,6 +34275,8 @@ impl ModifySnapshotAttributeRequestSerializer { pub struct ModifySpotFleetRequestRequest { ///

Indicates whether running Spot Instances should be terminated if the target capacity of the Spot Fleet request is decreased below the current size of the Spot Fleet.

pub excess_capacity_termination_policy: Option, + ///

The number of On-Demand Instances in the fleet.

+ pub on_demand_target_capacity: Option, ///

The ID of the Spot Fleet request.

pub spot_fleet_request_id: String, ///

The size of the fleet.

@@ -32653,6 +34298,12 @@ impl ModifySpotFleetRequestRequestSerializer { &field_value, ); } + if let Some(ref field_value) = obj.on_demand_target_capacity { + params.put( + &format!("{}{}", prefix, "OnDemandTargetCapacity"), + &field_value, + ); + } params.put( &format!("{}{}", prefix, "SpotFleetRequestId"), &obj.spot_fleet_request_id, @@ -32729,6 +34380,315 @@ impl ModifySubnetAttributeRequestSerializer { } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyTrafficMirrorFilterNetworkServicesRequest { + ///

The network service, for example Amazon DNS, that you want to mirror.

+ pub add_network_services: Option>, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The network service, for example Amazon DNS, that you no longer want to mirror.

+ pub remove_network_services: Option>, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: String, +} + +/// Serialize `ModifyTrafficMirrorFilterNetworkServicesRequest` contents to a `SignedRequest`. +struct ModifyTrafficMirrorFilterNetworkServicesRequestSerializer; +impl ModifyTrafficMirrorFilterNetworkServicesRequestSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &ModifyTrafficMirrorFilterNetworkServicesRequest, + ) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.add_network_services { + TrafficMirrorNetworkServiceListSerializer::serialize( + params, + &format!("{}{}", prefix, "AddNetworkService"), + field_value, + ); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.remove_network_services { + TrafficMirrorNetworkServiceListSerializer::serialize( + params, + &format!("{}{}", prefix, "RemoveNetworkService"), + field_value, + ); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterId"), + &obj.traffic_mirror_filter_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyTrafficMirrorFilterNetworkServicesResult { + ///

The Traffic Mirror filter that the network service is associated with.

+ pub traffic_mirror_filter: Option, +} + +struct ModifyTrafficMirrorFilterNetworkServicesResultDeserializer; +impl ModifyTrafficMirrorFilterNetworkServicesResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ModifyTrafficMirrorFilterNetworkServicesResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorFilter" => { + obj.traffic_mirror_filter = + Some(TrafficMirrorFilterDeserializer::deserialize( + "trafficMirrorFilter", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyTrafficMirrorFilterRuleRequest { + ///

The description to assign to the Traffic Mirror rule.

+ pub description: Option, + ///

The destination CIDR block to assign to the Traffic Mirror rule.

+ pub destination_cidr_block: Option, + ///

The destination ports that are associated with the Traffic Mirror rule.

+ pub destination_port_range: Option, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The protocol, for example TCP, to assign to the Traffic Mirror rule.

+ pub protocol: Option, + ///

The properties that you want to remove from the Traffic Mirror filter rule.

When you remove a property from a Traffic Mirror filter rule, the property is set to the default.

+ pub remove_fields: Option>, + ///

The action to assign to the rule.

+ pub rule_action: Option, + ///

The number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number.

+ pub rule_number: Option, + ///

The source CIDR block to assign to the Traffic Mirror rule.

+ pub source_cidr_block: Option, + ///

The port range to assign to the Traffic Mirror rule.

+ pub source_port_range: Option, + ///

The type of traffic (ingress | egress) to assign to the rule.

+ pub traffic_direction: Option, + ///

The ID of the Traffic Mirror rule.

+ pub traffic_mirror_filter_rule_id: String, +} + +/// Serialize `ModifyTrafficMirrorFilterRuleRequest` contents to a `SignedRequest`. +struct ModifyTrafficMirrorFilterRuleRequestSerializer; +impl ModifyTrafficMirrorFilterRuleRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ModifyTrafficMirrorFilterRuleRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + if let Some(ref field_value) = obj.destination_cidr_block { + params.put( + &format!("{}{}", prefix, "DestinationCidrBlock"), + &field_value, + ); + } + if let Some(ref field_value) = obj.destination_port_range { + TrafficMirrorPortRangeRequestSerializer::serialize( + params, + &format!("{}{}", prefix, "DestinationPortRange"), + field_value, + ); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.protocol { + params.put(&format!("{}{}", prefix, "Protocol"), &field_value); + } + if let Some(ref field_value) = obj.remove_fields { + TrafficMirrorFilterRuleFieldListSerializer::serialize( + params, + &format!("{}{}", prefix, "RemoveField"), + field_value, + ); + } + if let Some(ref field_value) = obj.rule_action { + params.put(&format!("{}{}", prefix, "RuleAction"), &field_value); + } + if let Some(ref field_value) = obj.rule_number { + params.put(&format!("{}{}", prefix, "RuleNumber"), &field_value); + } + if let Some(ref field_value) = obj.source_cidr_block { + params.put(&format!("{}{}", prefix, "SourceCidrBlock"), &field_value); + } + if let Some(ref field_value) = obj.source_port_range { + TrafficMirrorPortRangeRequestSerializer::serialize( + params, + &format!("{}{}", prefix, "SourcePortRange"), + field_value, + ); + } + if let Some(ref field_value) = obj.traffic_direction { + params.put(&format!("{}{}", prefix, "TrafficDirection"), &field_value); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterRuleId"), + &obj.traffic_mirror_filter_rule_id, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyTrafficMirrorFilterRuleResult { + ///

Modifies a Traffic Mirror rule.

+ pub traffic_mirror_filter_rule: Option, +} + +struct ModifyTrafficMirrorFilterRuleResultDeserializer; +impl ModifyTrafficMirrorFilterRuleResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ModifyTrafficMirrorFilterRuleResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorFilterRule" => { + obj.traffic_mirror_filter_rule = + Some(TrafficMirrorFilterRuleDeserializer::deserialize( + "trafficMirrorFilterRule", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyTrafficMirrorSessionRequest { + ///

The description to assign to the Traffic Mirror session.

+ pub description: Option, + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The number of bytes in each packet to mirror. These are bytes after the VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire packet.

+ pub packet_length: Option, + ///

The properties that you want to remove from the Traffic Mirror session.

When you remove a property from a Traffic Mirror session, the property is set to the default.

+ pub remove_fields: Option>, + ///

The session number determines the order in which sessions are evaluated when an interface is used by multiple sessions. The first session with a matching filter is the one that mirrors the packets.

Valid values are 1-32766.

+ pub session_number: Option, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: Option, + ///

The ID of the Traffic Mirror session.

+ pub traffic_mirror_session_id: String, + ///

The Traffic Mirror target. The target must be in the same VPC as the source, or have a VPC peering connection with the source.

+ pub traffic_mirror_target_id: Option, + ///

The virtual network ID of the Traffic Mirror session.

+ pub virtual_network_id: Option, +} + +/// Serialize `ModifyTrafficMirrorSessionRequest` contents to a `SignedRequest`. +struct ModifyTrafficMirrorSessionRequestSerializer; +impl ModifyTrafficMirrorSessionRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ModifyTrafficMirrorSessionRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.description { + params.put(&format!("{}{}", prefix, "Description"), &field_value); + } + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + if let Some(ref field_value) = obj.packet_length { + params.put(&format!("{}{}", prefix, "PacketLength"), &field_value); + } + if let Some(ref field_value) = obj.remove_fields { + TrafficMirrorSessionFieldListSerializer::serialize( + params, + &format!("{}{}", prefix, "RemoveField"), + field_value, + ); + } + if let Some(ref field_value) = obj.session_number { + params.put(&format!("{}{}", prefix, "SessionNumber"), &field_value); + } + if let Some(ref field_value) = obj.traffic_mirror_filter_id { + params.put( + &format!("{}{}", prefix, "TrafficMirrorFilterId"), + &field_value, + ); + } + params.put( + &format!("{}{}", prefix, "TrafficMirrorSessionId"), + &obj.traffic_mirror_session_id, + ); + if let Some(ref field_value) = obj.traffic_mirror_target_id { + params.put( + &format!("{}{}", prefix, "TrafficMirrorTargetId"), + &field_value, + ); + } + if let Some(ref field_value) = obj.virtual_network_id { + params.put(&format!("{}{}", prefix, "VirtualNetworkId"), &field_value); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyTrafficMirrorSessionResult { + ///

Information about the Traffic Mirror session.

+ pub traffic_mirror_session: Option, +} + +struct ModifyTrafficMirrorSessionResultDeserializer; +impl ModifyTrafficMirrorSessionResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ModifyTrafficMirrorSessionResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "trafficMirrorSession" => { + obj.traffic_mirror_session = + Some(TrafficMirrorSessionDeserializer::deserialize( + "trafficMirrorSession", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyTransitGatewayVpcAttachmentRequest { ///

The IDs of one or more subnets to add. You can specify at most one subnet per Availability Zone.

@@ -33070,7 +35030,7 @@ pub struct ModifyVpcEndpointRequest { pub add_subnet_ids: Option>, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

+ ///

A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format.

pub policy_document: Option, ///

(Interface endpoint) Indicate whether a private hosted zone is associated with the VPC.

pub private_dns_enabled: Option, @@ -33479,6 +35439,8 @@ impl ModifyVpcTenancyResultDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyVpnConnectionRequest { + ///

The ID of the customer gateway at your end of the VPN connection.

+ pub customer_gateway_id: Option, ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, ///

The ID of the transit gateway.

@@ -33498,6 +35460,9 @@ impl ModifyVpnConnectionRequestSerializer { prefix.push_str("."); } + if let Some(ref field_value) = obj.customer_gateway_id { + params.put(&format!("{}{}", prefix, "CustomerGatewayId"), &field_value); + } if let Some(ref field_value) = obj.dry_run { params.put(&format!("{}{}", prefix, "DryRun"), &field_value); } @@ -33544,6 +35509,271 @@ impl ModifyVpnConnectionResultDeserializer { ) } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyVpnTunnelCertificateRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the AWS Site-to-Site VPN connection.

+ pub vpn_connection_id: String, + ///

The external IP address of the VPN tunnel.

+ pub vpn_tunnel_outside_ip_address: String, +} + +/// Serialize `ModifyVpnTunnelCertificateRequest` contents to a `SignedRequest`. +struct ModifyVpnTunnelCertificateRequestSerializer; +impl ModifyVpnTunnelCertificateRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ModifyVpnTunnelCertificateRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put( + &format!("{}{}", prefix, "VpnConnectionId"), + &obj.vpn_connection_id, + ); + params.put( + &format!("{}{}", prefix, "VpnTunnelOutsideIpAddress"), + &obj.vpn_tunnel_outside_ip_address, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyVpnTunnelCertificateResult { + pub vpn_connection: Option, +} + +struct ModifyVpnTunnelCertificateResultDeserializer; +impl ModifyVpnTunnelCertificateResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ModifyVpnTunnelCertificateResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "vpnConnection" => { + obj.vpn_connection = Some(VpnConnectionDeserializer::deserialize( + "vpnConnection", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyVpnTunnelOptionsRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The tunnel options to modify.

+ pub tunnel_options: ModifyVpnTunnelOptionsSpecification, + ///

The ID of the AWS Site-to-Site VPN connection.

+ pub vpn_connection_id: String, + ///

The external IP address of the VPN tunnel.

+ pub vpn_tunnel_outside_ip_address: String, +} + +/// Serialize `ModifyVpnTunnelOptionsRequest` contents to a `SignedRequest`. +struct ModifyVpnTunnelOptionsRequestSerializer; +impl ModifyVpnTunnelOptionsRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ModifyVpnTunnelOptionsRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + ModifyVpnTunnelOptionsSpecificationSerializer::serialize( + params, + &format!("{}{}", prefix, "TunnelOptions"), + &obj.tunnel_options, + ); + params.put( + &format!("{}{}", prefix, "VpnConnectionId"), + &obj.vpn_connection_id, + ); + params.put( + &format!("{}{}", prefix, "VpnTunnelOutsideIpAddress"), + &obj.vpn_tunnel_outside_ip_address, + ); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyVpnTunnelOptionsResult { + pub vpn_connection: Option, +} + +struct ModifyVpnTunnelOptionsResultDeserializer; +impl ModifyVpnTunnelOptionsResultDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, ModifyVpnTunnelOptionsResult, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "vpnConnection" => { + obj.vpn_connection = Some(VpnConnectionDeserializer::deserialize( + "vpnConnection", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +///

The AWS Site-to-Site VPN tunnel options to modify.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct ModifyVpnTunnelOptionsSpecification { + ///

The number of seconds after which a DPD timeout occurs.

Constraints: A value between 0 and 30.

Default: 30

+ pub dpd_timeout_seconds: Option, + ///

The IKE versions that are permitted for the VPN tunnel.

Valid values: ikev1 | ikev2

+ pub ike_versions: Option>, + ///

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

+ pub phase_1dh_group_numbers: Option>, + ///

One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: AES128 | AES256

+ pub phase_1_encryption_algorithms: Option>, + ///

One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: SHA1 | SHA2-256

+ pub phase_1_integrity_algorithms: Option>, + ///

The lifetime for phase 1 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 28,800.

Default: 28800

+ pub phase_1_lifetime_seconds: Option, + ///

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

+ pub phase_2dh_group_numbers: Option>, + ///

One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: AES128 | AES256

+ pub phase_2_encryption_algorithms: Option>, + ///

One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: SHA1 | SHA2-256

+ pub phase_2_integrity_algorithms: Option>, + ///

The lifetime for phase 2 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 3,600. The value must be less than the value for Phase1LifetimeSeconds.

Default: 3600

+ pub phase_2_lifetime_seconds: Option, + ///

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

+ pub pre_shared_key: Option, + ///

The percentage of the rekey window (determined by RekeyMarginTimeSeconds) during which the rekey time is randomly selected.

Constraints: A value between 0 and 100.

Default: 100

+ pub rekey_fuzz_percentage: Option, + ///

The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 540

+ pub rekey_margin_time_seconds: Option, + ///

The number of packets in an IKE replay window.

Constraints: A value between 64 and 2048.

Default: 1024

+ pub replay_window_size: Option, + ///

The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway.

Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following CIDR blocks are reserved and cannot be used:

  • 169.254.0.0/30

  • 169.254.1.0/30

  • 169.254.2.0/30

  • 169.254.3.0/30

  • 169.254.4.0/30

  • 169.254.5.0/30

  • 169.254.169.252/30

+ pub tunnel_inside_cidr: Option, +} + +/// Serialize `ModifyVpnTunnelOptionsSpecification` contents to a `SignedRequest`. +struct ModifyVpnTunnelOptionsSpecificationSerializer; +impl ModifyVpnTunnelOptionsSpecificationSerializer { + fn serialize(params: &mut Params, name: &str, obj: &ModifyVpnTunnelOptionsSpecification) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dpd_timeout_seconds { + params.put(&format!("{}{}", prefix, "DPDTimeoutSeconds"), &field_value); + } + if let Some(ref field_value) = obj.ike_versions { + IKEVersionsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "IKEVersion"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1dh_group_numbers { + Phase1DHGroupNumbersRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase1DHGroupNumber"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1_encryption_algorithms { + Phase1EncryptionAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase1EncryptionAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1_integrity_algorithms { + Phase1IntegrityAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase1IntegrityAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1_lifetime_seconds { + params.put( + &format!("{}{}", prefix, "Phase1LifetimeSeconds"), + &field_value, + ); + } + if let Some(ref field_value) = obj.phase_2dh_group_numbers { + Phase2DHGroupNumbersRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase2DHGroupNumber"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_2_encryption_algorithms { + Phase2EncryptionAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase2EncryptionAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_2_integrity_algorithms { + Phase2IntegrityAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase2IntegrityAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_2_lifetime_seconds { + params.put( + &format!("{}{}", prefix, "Phase2LifetimeSeconds"), + &field_value, + ); + } + if let Some(ref field_value) = obj.pre_shared_key { + params.put(&format!("{}{}", prefix, "PreSharedKey"), &field_value); + } + if let Some(ref field_value) = obj.rekey_fuzz_percentage { + params.put( + &format!("{}{}", prefix, "RekeyFuzzPercentage"), + &field_value, + ); + } + if let Some(ref field_value) = obj.rekey_margin_time_seconds { + params.put( + &format!("{}{}", prefix, "RekeyMarginTimeSeconds"), + &field_value, + ); + } + if let Some(ref field_value) = obj.replay_window_size { + params.put(&format!("{}{}", prefix, "ReplayWindowSize"), &field_value); + } + if let Some(ref field_value) = obj.tunnel_inside_cidr { + params.put(&format!("{}{}", prefix, "TunnelInsideCidr"), &field_value); + } + } +} + #[derive(Default, Debug, Clone, PartialEq)] pub struct MonitorInstancesRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

@@ -33762,7 +35992,7 @@ pub struct NatGateway { pub nat_gateway_addresses: Option>, ///

The ID of the NAT gateway.

pub nat_gateway_id: Option, - ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+ ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

pub provisioned_bandwidth: Option, ///

The state of the NAT gateway.

  • pending: The NAT gateway is being created and is not ready to process traffic.

  • failed: The NAT gateway could not be created. Check the failureCode and failureMessage fields for the reason.

  • available: The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway.

  • deleting: The NAT gateway is in the process of being terminated and may still be processing traffic.

  • deleted: The NAT gateway has been terminated and is no longer processing traffic.

pub state: Option, @@ -34879,11 +37109,13 @@ impl OnDemandAllocationStrategyDeserializer { Ok(obj) } } -///

The allocation strategy of On-Demand Instances in an EC2 Fleet.

+///

Describes the configuration of On-Demand Instances in an EC2 Fleet.

#[derive(Default, Debug, Clone, PartialEq)] pub struct OnDemandOptions { ///

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowest-price, EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to lowest-price.

pub allocation_strategy: Option, + ///

The maximum amount per hour for On-Demand Instances that you're willing to pay.

+ pub max_total_price: Option, ///

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

pub min_target_capacity: Option, ///

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.

@@ -34908,6 +37140,10 @@ impl OnDemandOptionsDeserializer { stack, )?); } + "maxTotalPrice" => { + obj.max_total_price = + Some(StringDeserializer::deserialize("maxTotalPrice", stack)?); + } "minTargetCapacity" => { obj.min_target_capacity = Some(IntegerDeserializer::deserialize( "minTargetCapacity", @@ -34932,11 +37168,13 @@ impl OnDemandOptionsDeserializer { }) } } -///

The allocation strategy of On-Demand Instances in an EC2 Fleet.

+///

Describes the configuration of On-Demand Instances in an EC2 Fleet.

#[derive(Default, Debug, Clone, PartialEq)] pub struct OnDemandOptionsRequest { ///

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowest-price, EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to lowest-price.

pub allocation_strategy: Option, + ///

The maximum amount per hour for On-Demand Instances that you're willing to pay.

+ pub max_total_price: Option, ///

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

pub min_target_capacity: Option, ///

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.

@@ -34957,6 +37195,9 @@ impl OnDemandOptionsRequestSerializer { if let Some(ref field_value) = obj.allocation_strategy { params.put(&format!("{}{}", prefix, "AllocationStrategy"), &field_value); } + if let Some(ref field_value) = obj.max_total_price { + params.put(&format!("{}{}", prefix, "MaxTotalPrice"), &field_value); + } if let Some(ref field_value) = obj.min_target_capacity { params.put(&format!("{}{}", prefix, "MinTargetCapacity"), &field_value); } @@ -35135,6 +37376,522 @@ impl PermissionGroupDeserializer { Ok(obj) } } +struct Phase1DHGroupNumbersListDeserializer; +impl Phase1DHGroupNumbersListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(Phase1DHGroupNumbersListValueDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The Diffie-Hellmann group number for phase 1 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase1DHGroupNumbersListValue { + ///

The Diffie-Hellmann group number.

+ pub value: Option, +} + +struct Phase1DHGroupNumbersListValueDeserializer; +impl Phase1DHGroupNumbersListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, Phase1DHGroupNumbersListValue, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "value" => { + obj.value = Some(IntegerDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `Phase1DHGroupNumbersRequestList` contents to a `SignedRequest`. +struct Phase1DHGroupNumbersRequestListSerializer; +impl Phase1DHGroupNumbersRequestListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + Phase1DHGroupNumbersRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

Specifies a Diffie-Hellman group number for the VPN tunnel for phase 1 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase1DHGroupNumbersRequestListValue { + ///

The Diffie-Hellmann group number.

+ pub value: Option, +} + +/// Serialize `Phase1DHGroupNumbersRequestListValue` contents to a `SignedRequest`. +struct Phase1DHGroupNumbersRequestListValueSerializer; +impl Phase1DHGroupNumbersRequestListValueSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Phase1DHGroupNumbersRequestListValue) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + +struct Phase1EncryptionAlgorithmsListDeserializer; +impl Phase1EncryptionAlgorithmsListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push( + Phase1EncryptionAlgorithmsListValueDeserializer::deserialize("item", stack)?, + ); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The encryption algorithm for phase 1 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase1EncryptionAlgorithmsListValue { + ///

The value for the encryption algorithm.

+ pub value: Option, +} + +struct Phase1EncryptionAlgorithmsListValueDeserializer; +impl Phase1EncryptionAlgorithmsListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, Phase1EncryptionAlgorithmsListValue, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "value" => { + obj.value = Some(StringDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `Phase1EncryptionAlgorithmsRequestList` contents to a `SignedRequest`. +struct Phase1EncryptionAlgorithmsRequestListSerializer; +impl Phase1EncryptionAlgorithmsRequestListSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &Vec, + ) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + Phase1EncryptionAlgorithmsRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

Specifies the encryption algorithm for the VPN tunnel for phase 1 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase1EncryptionAlgorithmsRequestListValue { + ///

The value for the encryption algorithm.

+ pub value: Option, +} + +/// Serialize `Phase1EncryptionAlgorithmsRequestListValue` contents to a `SignedRequest`. +struct Phase1EncryptionAlgorithmsRequestListValueSerializer; +impl Phase1EncryptionAlgorithmsRequestListValueSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &Phase1EncryptionAlgorithmsRequestListValue, + ) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + +struct Phase1IntegrityAlgorithmsListDeserializer; +impl Phase1IntegrityAlgorithmsListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(Phase1IntegrityAlgorithmsListValueDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The integrity algorithm for phase 1 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase1IntegrityAlgorithmsListValue { + ///

The value for the integrity algorithm.

+ pub value: Option, +} + +struct Phase1IntegrityAlgorithmsListValueDeserializer; +impl Phase1IntegrityAlgorithmsListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, Phase1IntegrityAlgorithmsListValue, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "value" => { + obj.value = Some(StringDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `Phase1IntegrityAlgorithmsRequestList` contents to a `SignedRequest`. +struct Phase1IntegrityAlgorithmsRequestListSerializer; +impl Phase1IntegrityAlgorithmsRequestListSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &Vec, + ) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + Phase1IntegrityAlgorithmsRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

Specifies the integrity algorithm for the VPN tunnel for phase 1 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase1IntegrityAlgorithmsRequestListValue { + ///

The value for the integrity algorithm.

+ pub value: Option, +} + +/// Serialize `Phase1IntegrityAlgorithmsRequestListValue` contents to a `SignedRequest`. +struct Phase1IntegrityAlgorithmsRequestListValueSerializer; +impl Phase1IntegrityAlgorithmsRequestListValueSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Phase1IntegrityAlgorithmsRequestListValue) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + +struct Phase2DHGroupNumbersListDeserializer; +impl Phase2DHGroupNumbersListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(Phase2DHGroupNumbersListValueDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The Diffie-Hellmann group number for phase 2 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase2DHGroupNumbersListValue { + ///

The Diffie-Hellmann group number.

+ pub value: Option, +} + +struct Phase2DHGroupNumbersListValueDeserializer; +impl Phase2DHGroupNumbersListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, Phase2DHGroupNumbersListValue, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "value" => { + obj.value = Some(IntegerDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `Phase2DHGroupNumbersRequestList` contents to a `SignedRequest`. +struct Phase2DHGroupNumbersRequestListSerializer; +impl Phase2DHGroupNumbersRequestListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + Phase2DHGroupNumbersRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

Specifies a Diffie-Hellman group number for the VPN tunnel for phase 2 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase2DHGroupNumbersRequestListValue { + ///

The Diffie-Hellmann group number.

+ pub value: Option, +} + +/// Serialize `Phase2DHGroupNumbersRequestListValue` contents to a `SignedRequest`. +struct Phase2DHGroupNumbersRequestListValueSerializer; +impl Phase2DHGroupNumbersRequestListValueSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Phase2DHGroupNumbersRequestListValue) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + +struct Phase2EncryptionAlgorithmsListDeserializer; +impl Phase2EncryptionAlgorithmsListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push( + Phase2EncryptionAlgorithmsListValueDeserializer::deserialize("item", stack)?, + ); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The encryption algorithm for phase 2 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase2EncryptionAlgorithmsListValue { + ///

The encryption algorithm.

+ pub value: Option, +} + +struct Phase2EncryptionAlgorithmsListValueDeserializer; +impl Phase2EncryptionAlgorithmsListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, Phase2EncryptionAlgorithmsListValue, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "value" => { + obj.value = Some(StringDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `Phase2EncryptionAlgorithmsRequestList` contents to a `SignedRequest`. +struct Phase2EncryptionAlgorithmsRequestListSerializer; +impl Phase2EncryptionAlgorithmsRequestListSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &Vec, + ) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + Phase2EncryptionAlgorithmsRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

Specifies the encryption algorithm for the VPN tunnel for phase 2 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase2EncryptionAlgorithmsRequestListValue { + ///

The encryption algorithm.

+ pub value: Option, +} + +/// Serialize `Phase2EncryptionAlgorithmsRequestListValue` contents to a `SignedRequest`. +struct Phase2EncryptionAlgorithmsRequestListValueSerializer; +impl Phase2EncryptionAlgorithmsRequestListValueSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &Phase2EncryptionAlgorithmsRequestListValue, + ) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + +struct Phase2IntegrityAlgorithmsListDeserializer; +impl Phase2IntegrityAlgorithmsListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(Phase2IntegrityAlgorithmsListValueDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

The integrity algorithm for phase 2 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase2IntegrityAlgorithmsListValue { + ///

The integrity algorithm.

+ pub value: Option, +} + +struct Phase2IntegrityAlgorithmsListValueDeserializer; +impl Phase2IntegrityAlgorithmsListValueDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, Phase2IntegrityAlgorithmsListValue, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "value" => { + obj.value = Some(StringDeserializer::deserialize("value", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `Phase2IntegrityAlgorithmsRequestList` contents to a `SignedRequest`. +struct Phase2IntegrityAlgorithmsRequestListSerializer; +impl Phase2IntegrityAlgorithmsRequestListSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &Vec, + ) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + Phase2IntegrityAlgorithmsRequestListValueSerializer::serialize(params, &key, obj); + } + } +} + +///

Specifies the integrity algorithm for the VPN tunnel for phase 2 IKE negotiations.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct Phase2IntegrityAlgorithmsRequestListValue { + ///

The integrity algorithm.

+ pub value: Option, +} + +/// Serialize `Phase2IntegrityAlgorithmsRequestListValue` contents to a `SignedRequest`. +struct Phase2IntegrityAlgorithmsRequestListValueSerializer; +impl Phase2IntegrityAlgorithmsRequestListValueSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Phase2IntegrityAlgorithmsRequestListValue) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.value { + params.put(&format!("{}{}", prefix, "Value"), &field_value); + } + } +} + ///

Describes the placement of an instance.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Placement { @@ -36075,18 +38832,18 @@ impl ProvisionByoipCidrResultDeserializer { ) } } -///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ProvisionedBandwidth { - ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+ ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

pub provision_time: Option, - ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+ ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

pub provisioned: Option, - ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+ ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

pub request_time: Option, - ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+ ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

pub requested: Option, - ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

+ ///

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

pub status: Option, } @@ -36771,6 +39528,8 @@ impl RecurringChargesListDeserializer { pub struct Region { ///

The Region service endpoint.

pub endpoint: Option, + ///

The Region opt-in status. The possible values are opt-in-not-required, opted-in, and not-opted-in.

+ pub opt_in_status: Option, ///

The name of the Region.

pub region_name: Option, } @@ -36784,6 +39543,10 @@ impl RegionDeserializer { "regionEndpoint" => { obj.endpoint = Some(StringDeserializer::deserialize("regionEndpoint", stack)?); } + "optInStatus" => { + obj.opt_in_status = + Some(StringDeserializer::deserialize("optInStatus", stack)?); + } "regionName" => { obj.region_name = Some(StringDeserializer::deserialize("regionName", stack)?); } @@ -37728,7 +40491,7 @@ pub struct RequestLaunchTemplateData { pub license_specifications: Option>, ///

The monitoring for the instance.

pub monitoring: Option, - ///

One or more network interfaces. If you specify a network interface, you must specify any security groups as part of the network interface.

+ ///

One or more network interfaces. If you specify a network interface, you must specify any security groups and subnets as part of the network interface.

pub network_interfaces: Option>, ///

The placement for the instance.

pub placement: Option, @@ -38117,7 +40880,7 @@ pub struct RequestSpotLaunchSpecification { pub security_group_ids: Option>, ///

One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

pub security_groups: Option>, - ///

The ID of the subnet in which to launch the instance.

+ ///

The IDs of the subnets in which to launch the instance. To specify multiple subnets, separate them using commas; for example, "subnet-1234abcdeexample1, subnet-0987cdef6example2".

pub subnet_id: Option, ///

The Base64-encoded user data for the instance. User data is limited to 16 KB.

pub user_data: Option, @@ -39218,7 +41981,7 @@ impl ResetEbsDefaultKmsKeyIdRequestSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct ResetEbsDefaultKmsKeyIdResult { - ///

The full ARN of the default CMK that your account uses to encrypt an EBS volume when no CMK is specified in the API call that creates the volume.

+ ///

The Amazon Resource Name (ARN) of the default CMK for EBS encryption by default.

pub kms_key_id: Option, } @@ -40419,7 +43182,7 @@ pub struct RunInstancesRequest { pub min_count: i64, ///

Specifies whether detailed monitoring is enabled for the instance.

pub monitoring: Option, - ///

The network interfaces to associate with the instance. If you specify a network interface, you must specify any security groups as part of the network interface.

+ ///

The network interfaces to associate with the instance. If you specify a network interface, you must specify any security groups and subnets as part of the network interface.

pub network_interfaces: Option>, ///

The placement for the instance.

pub placement: Option, @@ -40431,7 +43194,7 @@ pub struct RunInstancesRequest { pub security_group_ids: Option>, ///

[EC2-Classic, default VPC] The names of the security groups. For a nondefault VPC, you must use security group IDs instead.

If you specify a network interface, you must specify any security groups as part of the network interface.

Default: Amazon EC2 uses the default security group.

pub security_groups: Option>, - ///

[EC2-VPC] The ID of the subnet to launch the instance into.

You cannot specify this option and the network interfaces option in the same request.

+ ///

[EC2-VPC] The ID of the subnet to launch the instance into.

If you specify a network interface, you must specify any subnets as part of the network interface.

pub subnet_id: Option, ///

The tags to apply to the resources during launch. You can only tag instances and volumes on launch. The specified tags are applied to all instances or volumes that are created during launch. To tag a resource after it has been created, see CreateTags.

pub tag_specifications: Option>, @@ -41240,7 +44003,7 @@ pub struct ScheduledInstancesEbs { pub snapshot_id: Option, ///

The size of the volume, in GiB.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

pub volume_size: Option, - ///

The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic.

Default: standard

+ ///

The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic.

Default: gp2

pub volume_type: Option, } @@ -41657,7 +44420,7 @@ impl ScopeDeserializer { pub struct SearchTransitGatewayRoutesRequest { ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

pub dry_run: Option, - ///

One or more filters. The possible values are:

  • attachment.transit-gateway-attachment-id- The id of the transit gateway attachment.

  • attachment.resource-id - The resource id of the transit gateway attachment.

  • attachment.resource-type - The attachment resource type (vpc | vpn).

  • route-search.exact-match - The exact match of the specified filter.

  • route-search.longest-prefix-match - The longest prefix that matches the route.

  • route-search.subnet-of-match - The routes with a subnet that match the specified CIDR filter.

  • route-search.supernet-of-match - The routes with a CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 routes in your route table and you specify supernet-of-match as 10.0.1.0/30, then the result returns 10.0.1.0/29.

  • state - The state of the attachment (available | deleted | deleting | failed | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • type - The type of roue (active | blackhole).

+ ///

One or more filters. The possible values are:

  • attachment.transit-gateway-attachment-id- The id of the transit gateway attachment.

  • attachment.resource-id - The resource id of the transit gateway attachment.

  • attachment.resource-type - The attachment resource type (vpc | vpn).

  • route-search.exact-match - The exact match of the specified filter.

  • route-search.longest-prefix-match - The longest prefix that matches the route.

  • route-search.subnet-of-match - The routes with a subnet that match the specified CIDR filter.

  • route-search.supernet-of-match - The routes with a CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 routes in your route table and you specify supernet-of-match as 10.0.1.0/30, then the result returns 10.0.1.0/29.

  • state - The state of the route (active | blackhole).

  • type - The type of route (propagated | static).

pub filters: Vec, ///

The maximum number of routes to return.

pub max_results: Option, @@ -41943,6 +44706,30 @@ impl SecurityGroupStringListSerializer { } } +#[derive(Default, Debug, Clone, PartialEq)] +pub struct SendDiagnosticInterruptRequest { + ///

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

+ pub dry_run: Option, + ///

The ID of the instance.

+ pub instance_id: String, +} + +/// Serialize `SendDiagnosticInterruptRequest` contents to a `SignedRequest`. +struct SendDiagnosticInterruptRequestSerializer; +impl SendDiagnosticInterruptRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &SendDiagnosticInterruptRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.dry_run { + params.put(&format!("{}{}", prefix, "DryRun"), &field_value); + } + params.put(&format!("{}{}", prefix, "InstanceId"), &obj.instance_id); + } +} + ///

Describes a service configuration for a VPC endpoint service.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ServiceConfiguration { @@ -42299,13 +45086,13 @@ impl SlotStartTimeRangeRequestSerializer { ///

Describes a snapshot.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Snapshot { - ///

The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.

+ ///

The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by DescribeSnapshots.

pub data_encryption_key_id: Option, ///

The description for the snapshot.

pub description: Option, ///

Indicates whether the snapshot is encrypted.

pub encrypted: Option, - ///

The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

+ ///

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

pub kms_key_id: Option, ///

Value from an Amazon-maintained list (amazon | self | all | aws-marketplace | microsoft) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.

pub owner_alias: Option, @@ -42319,7 +45106,7 @@ pub struct Snapshot { pub start_time: Option, ///

The snapshot state.

pub state: Option, - ///

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.

+ ///

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots.

pub state_message: Option, ///

Any tags assigned to the snapshot.

pub tags: Option>, @@ -42536,12 +45323,12 @@ impl SnapshotIdStringListSerializer { } } -///

Object that contains information about a snapshot.

+///

Information about a snapshot.

#[derive(Default, Debug, Clone, PartialEq)] pub struct SnapshotInfo { ///

Description specified by the CreateSnapshotRequest that has been applied to all snapshots.

pub description: Option, - ///

Boolean that specifies whether or not this snapshot is encrypted.

+ ///

Indicates whether the snapshot is encrypted.

pub encrypted: Option, ///

Account id used when creating this snapshot.

pub owner_id: Option, @@ -42805,7 +45592,7 @@ impl SpotDatafeedSubscriptionDeserializer { pub struct SpotFleetLaunchSpecification { ///

Deprecated.

pub addressing_type: Option, - ///

One or more block devices that are mapped to the Spot instances. You can't specify both a snapshot ID and an encryption value. This is because only blank volumes can be encrypted on creation. If a snapshot is the basis for a volume, it is not blank and its encryption status is used for the volume encryption status.

+ ///

One or more block devices that are mapped to the Spot Instances. You can't specify both a snapshot ID and an encryption value. This is because only blank volumes can be encrypted on creation. If a snapshot is the basis for a volume, it is not blank and its encryption status is used for the volume encryption status.

pub block_device_mappings: Option>, ///

Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

Default: false

pub ebs_optimized: Option, @@ -42831,7 +45618,7 @@ pub struct SpotFleetLaunchSpecification { pub security_groups: Option>, ///

The maximum price per unit hour that you are willing to pay for a Spot Instance. If this value is not specified, the default is the Spot price specified for the fleet. To determine the Spot price per unit hour, divide the Spot price by the value of WeightedCapacity.

pub spot_price: Option, - ///

The ID of the subnet in which to launch the instances. To specify multiple subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08".

+ ///

The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, "subnet-1234abcdeexample1, subnet-0987cdef6example2".

pub subnet_id: Option, ///

The tags to apply during creation.

pub tag_specifications: Option>, @@ -43142,7 +45929,7 @@ impl SpotFleetRequestConfigDeserializer { ///

Describes the configuration of a Spot Fleet request.

#[derive(Default, Debug, Clone, PartialEq)] pub struct SpotFleetRequestConfigData { - ///

Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is lowestPrice.

+ ///

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet request.

If the allocation strategy is lowestPrice, Spot Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, Spot Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

pub allocation_strategy: Option, ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency.

pub client_token: Option, @@ -43166,10 +45953,14 @@ pub struct SpotFleetRequestConfigData { pub on_demand_allocation_strategy: Option, ///

The number of On-Demand units fulfilled by this request compared to the set target On-Demand capacity.

pub on_demand_fulfilled_capacity: Option, + ///

The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

+ pub on_demand_max_total_price: Option, ///

The number of On-Demand units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

pub on_demand_target_capacity: Option, ///

Indicates whether Spot Fleet should replace unhealthy instances.

pub replace_unhealthy_instances: Option, + ///

The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

+ pub spot_max_total_price: Option, ///

The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.

pub spot_price: Option, ///

The number of units to request for the Spot Fleet. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

@@ -43271,6 +46062,12 @@ impl SpotFleetRequestConfigDataDeserializer { stack, )?); } + "onDemandMaxTotalPrice" => { + obj.on_demand_max_total_price = Some(StringDeserializer::deserialize( + "onDemandMaxTotalPrice", + stack, + )?); + } "onDemandTargetCapacity" => { obj.on_demand_target_capacity = Some(IntegerDeserializer::deserialize( "onDemandTargetCapacity", @@ -43283,6 +46080,10 @@ impl SpotFleetRequestConfigDataDeserializer { stack, )?); } + "spotMaxTotalPrice" => { + obj.spot_max_total_price = + Some(StringDeserializer::deserialize("spotMaxTotalPrice", stack)?); + } "spotPrice" => { obj.spot_price = Some(StringDeserializer::deserialize("spotPrice", stack)?); } @@ -43389,6 +46190,12 @@ impl SpotFleetRequestConfigDataSerializer { &field_value, ); } + if let Some(ref field_value) = obj.on_demand_max_total_price { + params.put( + &format!("{}{}", prefix, "OnDemandMaxTotalPrice"), + &field_value, + ); + } if let Some(ref field_value) = obj.on_demand_target_capacity { params.put( &format!("{}{}", prefix, "OnDemandTargetCapacity"), @@ -43401,6 +46208,9 @@ impl SpotFleetRequestConfigDataSerializer { &field_value, ); } + if let Some(ref field_value) = obj.spot_max_total_price { + params.put(&format!("{}{}", prefix, "SpotMaxTotalPrice"), &field_value); + } if let Some(ref field_value) = obj.spot_price { params.put(&format!("{}{}", prefix, "SpotPrice"), &field_value); } @@ -43858,12 +46668,14 @@ impl SpotMarketOptionsSerializer { ///

Describes the configuration of Spot Instances in an EC2 Fleet.

#[derive(Default, Debug, Clone, PartialEq)] pub struct SpotOptions { - ///

Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is lowest-price.

+ ///

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowestPrice, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

pub allocation_strategy: Option, ///

The behavior when a Spot Instance is interrupted. The default is terminate.

pub instance_interruption_behavior: Option, ///

The number of Spot pools across which to allocate your target Spot capacity. Valid only when AllocationStrategy is set to lowestPrice. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

pub instance_pools_to_use_count: Option, + ///

The maximum amount per hour for Spot Instances that you're willing to pay.

+ pub max_total_price: Option, ///

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

pub min_target_capacity: Option, ///

Indicates that the fleet launches all Spot Instances into a single Availability Zone.

@@ -43901,6 +46713,10 @@ impl SpotOptionsDeserializer { stack, )?); } + "maxTotalPrice" => { + obj.max_total_price = + Some(StringDeserializer::deserialize("maxTotalPrice", stack)?); + } "minTargetCapacity" => { obj.min_target_capacity = Some(IntegerDeserializer::deserialize( "minTargetCapacity", @@ -43928,12 +46744,14 @@ impl SpotOptionsDeserializer { ///

Describes the configuration of Spot Instances in an EC2 Fleet request.

#[derive(Default, Debug, Clone, PartialEq)] pub struct SpotOptionsRequest { - ///

Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is lowestPrice.

+ ///

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowestPrice, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

pub allocation_strategy: Option, ///

The behavior when a Spot Instance is interrupted. The default is terminate.

pub instance_interruption_behavior: Option, ///

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

pub instance_pools_to_use_count: Option, + ///

The maximum amount per hour for Spot Instances that you're willing to pay.

+ pub max_total_price: Option, ///

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

pub min_target_capacity: Option, ///

Indicates that the fleet launches all Spot Instances into a single Availability Zone.

@@ -43966,6 +46784,9 @@ impl SpotOptionsRequestSerializer { &field_value, ); } + if let Some(ref field_value) = obj.max_total_price { + params.put(&format!("{}{}", prefix, "MaxTotalPrice"), &field_value); + } if let Some(ref field_value) = obj.min_target_capacity { params.put(&format!("{}{}", prefix, "MinTargetCapacity"), &field_value); } @@ -44987,7 +47808,7 @@ impl TagListSerializer { ///

The tags to apply to a resource when the resource is being created.

#[derive(Default, Debug, Clone, PartialEq)] pub struct TagSpecification { - ///

The type of resource to tag. Currently, the resource types that support tagging on creation are fleet, dedicated-host, instance, snapshot, and volume. To tag a resource after it has been created, see CreateTags.

+ ///

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host | fleet | instance | launch-template | snapshot | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | volume.

To tag a resource after it has been created, see CreateTags.

pub resource_type: Option, ///

The tags to apply to the resource.

pub tags: Option>, @@ -45022,14 +47843,14 @@ impl TagSpecificationListSerializer { } } -///

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

+///

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice, or both to ensure your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and

#[derive(Default, Debug, Clone, PartialEq)] pub struct TargetCapacitySpecification { ///

The default TotalTargetCapacity, which is either Spot or On-Demand.

pub default_target_capacity_type: Option, - ///

The number of On-Demand units to request.

+ ///

The number of On-Demand units to request. If you specify a target capacity for Spot units, you cannot specify a target capacity for On-Demand units.

pub on_demand_target_capacity: Option, - ///

The maximum number of Spot units to launch.

+ ///

The maximum number of Spot units to launch. If you specify a target capacity for On-Demand units, you cannot specify a target capacity for Spot units.

pub spot_target_capacity: Option, ///

The number of units to request, filled using DefaultTargetCapacityType.

pub total_target_capacity: Option, @@ -45079,7 +47900,7 @@ impl TargetCapacitySpecificationDeserializer { ) } } -///

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

+///

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and .

#[derive(Default, Debug, Clone, PartialEq)] pub struct TargetCapacitySpecificationRequest { ///

The default TotalTargetCapacity, which is either Spot or On-Demand.

@@ -45669,6 +48490,564 @@ impl TerminateInstancesResultDeserializer { ) } } +struct TrafficDirectionDeserializer; +impl TrafficDirectionDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} +///

Describes the Traffic Mirror filter.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TrafficMirrorFilter { + ///

The description of the Traffic Mirror filter.

+ pub description: Option, + ///

Information about the egress rules that are associated with the Traffic Mirror filter.

+ pub egress_filter_rules: Option>, + ///

Information about the ingress rules that are associated with the Traffic Mirror filter.

+ pub ingress_filter_rules: Option>, + ///

The network service traffic that is associated with the Traffic Mirror filter.

+ pub network_services: Option>, + ///

The tags assigned to the Traffic Mirror filter.

+ pub tags: Option>, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: Option, +} + +struct TrafficMirrorFilterDeserializer; +impl TrafficMirrorFilterDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, TrafficMirrorFilter, _>(tag_name, stack, |name, stack, obj| { + match name { + "description" => { + obj.description = Some(StringDeserializer::deserialize("description", stack)?); + } + "egressFilterRuleSet" => { + obj.egress_filter_rules.get_or_insert(vec![]).extend( + TrafficMirrorFilterRuleListDeserializer::deserialize( + "egressFilterRuleSet", + stack, + )?, + ); + } + "ingressFilterRuleSet" => { + obj.ingress_filter_rules.get_or_insert(vec![]).extend( + TrafficMirrorFilterRuleListDeserializer::deserialize( + "ingressFilterRuleSet", + stack, + )?, + ); + } + "networkServiceSet" => { + obj.network_services.get_or_insert(vec![]).extend( + TrafficMirrorNetworkServiceListDeserializer::deserialize( + "networkServiceSet", + stack, + )?, + ); + } + "tagSet" => { + obj.tags + .get_or_insert(vec![]) + .extend(TagListDeserializer::deserialize("tagSet", stack)?); + } + "trafficMirrorFilterId" => { + obj.traffic_mirror_filter_id = Some(StringDeserializer::deserialize( + "trafficMirrorFilterId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +///

Describes the Traffic Mirror rule.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TrafficMirrorFilterRule { + ///

The description of the Traffic Mirror rule.

+ pub description: Option, + ///

The destination CIDR block assigned to the Traffic Mirror rule.

+ pub destination_cidr_block: Option, + ///

The destination port range assigned to the Traffic Mirror rule.

+ pub destination_port_range: Option, + ///

The protocol assigned to the Traffic Mirror rule.

+ pub protocol: Option, + ///

The action assigned to the Traffic Mirror rule.

+ pub rule_action: Option, + ///

The rule number of the Traffic Mirror rule.

+ pub rule_number: Option, + ///

The source CIDR block assigned to the Traffic Mirror rule.

+ pub source_cidr_block: Option, + ///

The source port range assigned to the Traffic Mirror rule.

+ pub source_port_range: Option, + ///

The traffic direction assigned to the Traffic Mirror rule.

+ pub traffic_direction: Option, + ///

The ID of the Traffic Mirror filter that the rule is associated with.

+ pub traffic_mirror_filter_id: Option, + ///

The ID of the Traffic Mirror rule.

+ pub traffic_mirror_filter_rule_id: Option, +} + +struct TrafficMirrorFilterRuleDeserializer; +impl TrafficMirrorFilterRuleDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, TrafficMirrorFilterRule, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "description" => { + obj.description = + Some(StringDeserializer::deserialize("description", stack)?); + } + "destinationCidrBlock" => { + obj.destination_cidr_block = Some(StringDeserializer::deserialize( + "destinationCidrBlock", + stack, + )?); + } + "destinationPortRange" => { + obj.destination_port_range = + Some(TrafficMirrorPortRangeDeserializer::deserialize( + "destinationPortRange", + stack, + )?); + } + "protocol" => { + obj.protocol = Some(IntegerDeserializer::deserialize("protocol", stack)?); + } + "ruleAction" => { + obj.rule_action = Some(TrafficMirrorRuleActionDeserializer::deserialize( + "ruleAction", + stack, + )?); + } + "ruleNumber" => { + obj.rule_number = + Some(IntegerDeserializer::deserialize("ruleNumber", stack)?); + } + "sourceCidrBlock" => { + obj.source_cidr_block = + Some(StringDeserializer::deserialize("sourceCidrBlock", stack)?); + } + "sourcePortRange" => { + obj.source_port_range = + Some(TrafficMirrorPortRangeDeserializer::deserialize( + "sourcePortRange", + stack, + )?); + } + "trafficDirection" => { + obj.traffic_direction = Some(TrafficDirectionDeserializer::deserialize( + "trafficDirection", + stack, + )?); + } + "trafficMirrorFilterId" => { + obj.traffic_mirror_filter_id = Some(StringDeserializer::deserialize( + "trafficMirrorFilterId", + stack, + )?); + } + "trafficMirrorFilterRuleId" => { + obj.traffic_mirror_filter_rule_id = Some(StringDeserializer::deserialize( + "trafficMirrorFilterRuleId", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} + +/// Serialize `TrafficMirrorFilterRuleFieldList` contents to a `SignedRequest`. +struct TrafficMirrorFilterRuleFieldListSerializer; +impl TrafficMirrorFilterRuleFieldListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + params.put(&key, &obj); + } + } +} + +struct TrafficMirrorFilterRuleListDeserializer; +impl TrafficMirrorFilterRuleListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(TrafficMirrorFilterRuleDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +struct TrafficMirrorFilterSetDeserializer; +impl TrafficMirrorFilterSetDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(TrafficMirrorFilterDeserializer::deserialize("item", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +struct TrafficMirrorNetworkServiceDeserializer; +impl TrafficMirrorNetworkServiceDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} +struct TrafficMirrorNetworkServiceListDeserializer; +impl TrafficMirrorNetworkServiceListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(TrafficMirrorNetworkServiceDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} + +/// Serialize `TrafficMirrorNetworkServiceList` contents to a `SignedRequest`. +struct TrafficMirrorNetworkServiceListSerializer; +impl TrafficMirrorNetworkServiceListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + params.put(&key, &obj); + } + } +} + +///

Describes the Traffic Mirror port range.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TrafficMirrorPortRange { + ///

The start of the Traffic Mirror port range. This applies to the TCP and UDP protocols.

+ pub from_port: Option, + ///

The end of the Traffic Mirror port range. This applies to the TCP and UDP protocols.

+ pub to_port: Option, +} + +struct TrafficMirrorPortRangeDeserializer; +impl TrafficMirrorPortRangeDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, TrafficMirrorPortRange, _>(tag_name, stack, |name, stack, obj| { + match name { + "fromPort" => { + obj.from_port = Some(IntegerDeserializer::deserialize("fromPort", stack)?); + } + "toPort" => { + obj.to_port = Some(IntegerDeserializer::deserialize("toPort", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +///

Information about the Traffic Mirror filter rule port range.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TrafficMirrorPortRangeRequest { + ///

The first port in the Traffic Mirror port range. This applies to the TCP and UDP protocols.

+ pub from_port: Option, + ///

The last port in the Traffic Mirror port range. This applies to the TCP and UDP protocols.

+ pub to_port: Option, +} + +/// Serialize `TrafficMirrorPortRangeRequest` contents to a `SignedRequest`. +struct TrafficMirrorPortRangeRequestSerializer; +impl TrafficMirrorPortRangeRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &TrafficMirrorPortRangeRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.from_port { + params.put(&format!("{}{}", prefix, "FromPort"), &field_value); + } + if let Some(ref field_value) = obj.to_port { + params.put(&format!("{}{}", prefix, "ToPort"), &field_value); + } + } +} + +struct TrafficMirrorRuleActionDeserializer; +impl TrafficMirrorRuleActionDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} +///

Describes a Traffic Mirror session.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TrafficMirrorSession { + ///

The description of the Traffic Mirror session.

+ pub description: Option, + ///

The ID of the Traffic Mirror session's network interface.

+ pub network_interface_id: Option, + ///

The ID of the account that owns the Traffic Mirror session.

+ pub owner_id: Option, + ///

The number of bytes in each packet to mirror. These are the bytes after the VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire packet

+ pub packet_length: Option, + ///

The session number determines the order in which sessions are evaluated when an interface is used by multiple sessions. The first session with a matching filter is the one that mirrors the packets.

Valid values are 1-32766.

+ pub session_number: Option, + ///

The tags assigned to the Traffic Mirror session.

+ pub tags: Option>, + ///

The ID of the Traffic Mirror filter.

+ pub traffic_mirror_filter_id: Option, + ///

The ID for the Traffic Mirror session.

+ pub traffic_mirror_session_id: Option, + ///

The ID of the Traffic Mirror target.

+ pub traffic_mirror_target_id: Option, + ///

The virtual network ID associated with the Traffic Mirror session.

+ pub virtual_network_id: Option, +} + +struct TrafficMirrorSessionDeserializer; +impl TrafficMirrorSessionDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, TrafficMirrorSession, _>(tag_name, stack, |name, stack, obj| { + match name { + "description" => { + obj.description = Some(StringDeserializer::deserialize("description", stack)?); + } + "networkInterfaceId" => { + obj.network_interface_id = Some(StringDeserializer::deserialize( + "networkInterfaceId", + stack, + )?); + } + "ownerId" => { + obj.owner_id = Some(StringDeserializer::deserialize("ownerId", stack)?); + } + "packetLength" => { + obj.packet_length = + Some(IntegerDeserializer::deserialize("packetLength", stack)?); + } + "sessionNumber" => { + obj.session_number = + Some(IntegerDeserializer::deserialize("sessionNumber", stack)?); + } + "tagSet" => { + obj.tags + .get_or_insert(vec![]) + .extend(TagListDeserializer::deserialize("tagSet", stack)?); + } + "trafficMirrorFilterId" => { + obj.traffic_mirror_filter_id = Some(StringDeserializer::deserialize( + "trafficMirrorFilterId", + stack, + )?); + } + "trafficMirrorSessionId" => { + obj.traffic_mirror_session_id = Some(StringDeserializer::deserialize( + "trafficMirrorSessionId", + stack, + )?); + } + "trafficMirrorTargetId" => { + obj.traffic_mirror_target_id = Some(StringDeserializer::deserialize( + "trafficMirrorTargetId", + stack, + )?); + } + "virtualNetworkId" => { + obj.virtual_network_id = + Some(IntegerDeserializer::deserialize("virtualNetworkId", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} + +/// Serialize `TrafficMirrorSessionFieldList` contents to a `SignedRequest`. +struct TrafficMirrorSessionFieldListSerializer; +impl TrafficMirrorSessionFieldListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + params.put(&key, &obj); + } + } +} + +struct TrafficMirrorSessionSetDeserializer; +impl TrafficMirrorSessionSetDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(TrafficMirrorSessionDeserializer::deserialize( + "item", stack, + )?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +///

Describes a Traffic Mirror target.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TrafficMirrorTarget { + ///

Information about the Traffic Mirror target.

+ pub description: Option, + ///

The network interface ID that is attached to the target.

+ pub network_interface_id: Option, + ///

The Amazon Resource Name (ARN) of the Network Load Balancer.

+ pub network_load_balancer_arn: Option, + ///

The ID of the account that owns the Traffic Mirror target.

+ pub owner_id: Option, + ///

The tags assigned to the Traffic Mirror target.

+ pub tags: Option>, + ///

The ID of the Traffic Mirror target.

+ pub traffic_mirror_target_id: Option, + ///

The type of Traffic Mirror target.

+ pub type_: Option, +} + +struct TrafficMirrorTargetDeserializer; +impl TrafficMirrorTargetDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, TrafficMirrorTarget, _>(tag_name, stack, |name, stack, obj| { + match name { + "description" => { + obj.description = Some(StringDeserializer::deserialize("description", stack)?); + } + "networkInterfaceId" => { + obj.network_interface_id = Some(StringDeserializer::deserialize( + "networkInterfaceId", + stack, + )?); + } + "networkLoadBalancerArn" => { + obj.network_load_balancer_arn = Some(StringDeserializer::deserialize( + "networkLoadBalancerArn", + stack, + )?); + } + "ownerId" => { + obj.owner_id = Some(StringDeserializer::deserialize("ownerId", stack)?); + } + "tagSet" => { + obj.tags + .get_or_insert(vec![]) + .extend(TagListDeserializer::deserialize("tagSet", stack)?); + } + "trafficMirrorTargetId" => { + obj.traffic_mirror_target_id = Some(StringDeserializer::deserialize( + "trafficMirrorTargetId", + stack, + )?); + } + "type" => { + obj.type_ = Some(TrafficMirrorTargetTypeDeserializer::deserialize( + "type", stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +struct TrafficMirrorTargetSetDeserializer; +impl TrafficMirrorTargetSetDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(TrafficMirrorTargetDeserializer::deserialize("item", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} +struct TrafficMirrorTargetTypeDeserializer; +impl TrafficMirrorTargetTypeDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} struct TrafficTypeDeserializer; impl TrafficTypeDeserializer { #[allow(unused_variables)] @@ -46904,18 +50283,174 @@ impl TransportProtocolDeserializer { Ok(obj) } } +///

The VPN tunnel options.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct TunnelOption { + ///

The number of seconds after which a DPD timeout occurs.

+ pub dpd_timeout_seconds: Option, + ///

The IKE versions that are permitted for the VPN tunnel.

+ pub ike_versions: Option>, + ///

The external IP address of the VPN tunnel.

+ pub outside_ip_address: Option, + ///

The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 1 IKE negotiations.

+ pub phase_1dh_group_numbers: Option>, + ///

The permitted encryption algorithms for the VPN tunnel for phase 1 IKE negotiations.

+ pub phase_1_encryption_algorithms: Option>, + ///

The permitted integrity algorithms for the VPN tunnel for phase 1 IKE negotiations.

+ pub phase_1_integrity_algorithms: Option>, + ///

The lifetime for phase 1 of the IKE negotiation, in seconds.

+ pub phase_1_lifetime_seconds: Option, + ///

The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 2 IKE negotiations.

+ pub phase_2dh_group_numbers: Option>, + ///

The permitted encryption algorithms for the VPN tunnel for phase 2 IKE negotiations.

+ pub phase_2_encryption_algorithms: Option>, + ///

The permitted integrity algorithms for the VPN tunnel for phase 2 IKE negotiations.

+ pub phase_2_integrity_algorithms: Option>, + ///

The lifetime for phase 2 of the IKE negotiation, in seconds.

+ pub phase_2_lifetime_seconds: Option, + ///

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

+ pub pre_shared_key: Option, + ///

The percentage of the rekey window determined by RekeyMarginTimeSeconds during which the rekey time is randomly selected.

+ pub rekey_fuzz_percentage: Option, + ///

The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey.

+ pub rekey_margin_time_seconds: Option, + ///

The number of packets in an IKE replay window.

+ pub replay_window_size: Option, + ///

The range of inside IP addresses for the tunnel.

+ pub tunnel_inside_cidr: Option, +} -/// Serialize `TunnelOptionsList` contents to a `SignedRequest`. -struct TunnelOptionsListSerializer; -impl TunnelOptionsListSerializer { - fn serialize(params: &mut Params, name: &str, obj: &Vec) { - for (index, obj) in obj.iter().enumerate() { - let key = format!("{}.{}", name, index + 1); - VpnTunnelOptionsSpecificationSerializer::serialize(params, &key, obj); - } +struct TunnelOptionDeserializer; +impl TunnelOptionDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, TunnelOption, _>(tag_name, stack, |name, stack, obj| { + match name { + "dpdTimeoutSeconds" => { + obj.dpd_timeout_seconds = Some(IntegerDeserializer::deserialize( + "dpdTimeoutSeconds", + stack, + )?); + } + "ikeVersionSet" => { + obj.ike_versions.get_or_insert(vec![]).extend( + IKEVersionsListDeserializer::deserialize("ikeVersionSet", stack)?, + ); + } + "outsideIpAddress" => { + obj.outside_ip_address = + Some(StringDeserializer::deserialize("outsideIpAddress", stack)?); + } + "phase1DHGroupNumberSet" => { + obj.phase_1dh_group_numbers.get_or_insert(vec![]).extend( + Phase1DHGroupNumbersListDeserializer::deserialize( + "phase1DHGroupNumberSet", + stack, + )?, + ); + } + "phase1EncryptionAlgorithmSet" => { + obj.phase_1_encryption_algorithms + .get_or_insert(vec![]) + .extend(Phase1EncryptionAlgorithmsListDeserializer::deserialize( + "phase1EncryptionAlgorithmSet", + stack, + )?); + } + "phase1IntegrityAlgorithmSet" => { + obj.phase_1_integrity_algorithms + .get_or_insert(vec![]) + .extend(Phase1IntegrityAlgorithmsListDeserializer::deserialize( + "phase1IntegrityAlgorithmSet", + stack, + )?); + } + "phase1LifetimeSeconds" => { + obj.phase_1_lifetime_seconds = Some(IntegerDeserializer::deserialize( + "phase1LifetimeSeconds", + stack, + )?); + } + "phase2DHGroupNumberSet" => { + obj.phase_2dh_group_numbers.get_or_insert(vec![]).extend( + Phase2DHGroupNumbersListDeserializer::deserialize( + "phase2DHGroupNumberSet", + stack, + )?, + ); + } + "phase2EncryptionAlgorithmSet" => { + obj.phase_2_encryption_algorithms + .get_or_insert(vec![]) + .extend(Phase2EncryptionAlgorithmsListDeserializer::deserialize( + "phase2EncryptionAlgorithmSet", + stack, + )?); + } + "phase2IntegrityAlgorithmSet" => { + obj.phase_2_integrity_algorithms + .get_or_insert(vec![]) + .extend(Phase2IntegrityAlgorithmsListDeserializer::deserialize( + "phase2IntegrityAlgorithmSet", + stack, + )?); + } + "phase2LifetimeSeconds" => { + obj.phase_2_lifetime_seconds = Some(IntegerDeserializer::deserialize( + "phase2LifetimeSeconds", + stack, + )?); + } + "preSharedKey" => { + obj.pre_shared_key = + Some(StringDeserializer::deserialize("preSharedKey", stack)?); + } + "rekeyFuzzPercentage" => { + obj.rekey_fuzz_percentage = Some(IntegerDeserializer::deserialize( + "rekeyFuzzPercentage", + stack, + )?); + } + "rekeyMarginTimeSeconds" => { + obj.rekey_margin_time_seconds = Some(IntegerDeserializer::deserialize( + "rekeyMarginTimeSeconds", + stack, + )?); + } + "replayWindowSize" => { + obj.replay_window_size = + Some(IntegerDeserializer::deserialize("replayWindowSize", stack)?); + } + "tunnelInsideCidr" => { + obj.tunnel_inside_cidr = + Some(StringDeserializer::deserialize("tunnelInsideCidr", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +struct TunnelOptionsListDeserializer; +impl TunnelOptionsListDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "item" { + obj.push(TunnelOptionDeserializer::deserialize("item", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) } } - #[derive(Default, Debug, Clone, PartialEq)] pub struct UnassignIpv6AddressesRequest { ///

The IPv6 addresses to unassign from the network interface.

@@ -47712,6 +51247,8 @@ impl VersionStringListSerializer { pub struct VgwTelemetry { ///

The number of accepted routes.

pub accepted_route_count: Option, + ///

The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate.

+ pub certificate_arn: Option, ///

The date and time of the last change in status.

pub last_status_change: Option, ///

The Internet-routable IP address of the virtual private gateway's outside interface.

@@ -47737,6 +51274,10 @@ impl VgwTelemetryDeserializer { stack, )?); } + "certificateArn" => { + obj.certificate_arn = + Some(StringDeserializer::deserialize("certificateArn", stack)?); + } "lastStatusChange" => { obj.last_status_change = Some(DateTimeDeserializer::deserialize( "lastStatusChange", @@ -47797,11 +51338,11 @@ pub struct Volume { pub availability_zone: Option, ///

The time stamp when volume creation was initiated.

pub create_time: Option, - ///

Indicates whether the volume will be encrypted.

+ ///

Indicates whether the volume is encrypted.

pub encrypted: Option, ///

The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes, in most Regions. The maximum IOPS for io1 of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

pub iops: Option, - ///

The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

+ ///

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

pub kms_key_id: Option, ///

The size of the volume, in GiBs.

pub size: Option, @@ -48751,6 +52292,8 @@ pub struct VpcEndpoint { pub groups: Option>, ///

(Interface endpoint) One or more network interfaces for the endpoint.

pub network_interface_ids: Option>, + ///

The ID of the AWS account that owns the VPC endpoint.

+ pub owner_id: Option, ///

The policy document associated with the endpoint, if applicable.

pub policy_document: Option, ///

(Interface endpoint) Indicates whether the VPC is associated with a private hosted zone.

@@ -48805,6 +52348,9 @@ impl VpcEndpointDeserializer { ValueStringListDeserializer::deserialize("networkInterfaceIdSet", stack)?, ); } + "ownerId" => { + obj.owner_id = Some(StringDeserializer::deserialize("ownerId", stack)?); + } "policyDocument" => { obj.policy_document = Some(StringDeserializer::deserialize("policyDocument", stack)?); @@ -48864,6 +52410,10 @@ impl VpcEndpointDeserializer { pub struct VpcEndpointConnection { ///

The date and time the VPC endpoint was created.

pub creation_timestamp: Option, + ///

The DNS entries for the VPC endpoint.

+ pub dns_entries: Option>, + ///

The Amazon Resource Names (ARNs) of the network load balancers for the service.

+ pub network_load_balancer_arns: Option>, ///

The ID of the service to which the endpoint is connected.

pub service_id: Option, ///

The ID of the VPC endpoint.

@@ -48889,6 +52439,19 @@ impl VpcEndpointConnectionDeserializer { stack, )?); } + "dnsEntrySet" => { + obj.dns_entries + .get_or_insert(vec![]) + .extend(DnsEntrySetDeserializer::deserialize("dnsEntrySet", stack)?); + } + "networkLoadBalancerArnSet" => { + obj.network_load_balancer_arns.get_or_insert(vec![]).extend( + ValueStringListDeserializer::deserialize( + "networkLoadBalancerArnSet", + stack, + )?, + ); + } "serviceId" => { obj.service_id = Some(StringDeserializer::deserialize("serviceId", stack)?); } @@ -49443,6 +53006,8 @@ impl VpnConnectionListDeserializer { pub struct VpnConnectionOptions { ///

Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

pub static_routes_only: Option, + ///

Indicates the VPN tunnel options.

+ pub tunnel_options: Option>, } struct VpnConnectionOptionsDeserializer; @@ -49458,6 +53023,11 @@ impl VpnConnectionOptionsDeserializer { obj.static_routes_only = Some(BooleanDeserializer::deserialize("staticRoutesOnly", stack)?); } + "tunnelOptionSet" => { + obj.tunnel_options.get_or_insert(vec![]).extend( + TunnelOptionsListDeserializer::deserialize("tunnelOptionSet", stack)?, + ); + } _ => skip_tree(stack), } Ok(()) @@ -49486,7 +53056,7 @@ impl VpnConnectionOptionsSpecificationSerializer { params.put(&format!("{}{}", prefix, "StaticRoutesOnly"), &field_value); } if let Some(ref field_value) = obj.tunnel_options { - TunnelOptionsListSerializer::serialize( + VpnTunnelOptionsSpecificationsListSerializer::serialize( params, &format!("{}{}", prefix, "TunnelOptions"), field_value, @@ -49690,8 +53260,34 @@ impl VpnStaticRouteSourceDeserializer { ///

The tunnel options for a VPN connection.

#[derive(Default, Debug, Clone, PartialEq)] pub struct VpnTunnelOptionsSpecification { - ///

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.

Constraints: Allowed characters are alphanumeric characters and ._. Must be between 8 and 64 characters in length and cannot start with zero (0).

+ ///

The number of seconds after which a DPD timeout occurs.

Constraints: A value between 0 and 30.

Default: 30

+ pub dpd_timeout_seconds: Option, + ///

The IKE versions that are permitted for the VPN tunnel.

Valid values: ikev1 | ikev2

+ pub ike_versions: Option>, + ///

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

+ pub phase_1dh_group_numbers: Option>, + ///

One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: AES128 | AES256

+ pub phase_1_encryption_algorithms: Option>, + ///

One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: SHA1 | SHA2-256

+ pub phase_1_integrity_algorithms: Option>, + ///

The lifetime for phase 1 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 28,800.

Default: 28800

+ pub phase_1_lifetime_seconds: Option, + ///

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

+ pub phase_2dh_group_numbers: Option>, + ///

One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: AES128 | AES256

+ pub phase_2_encryption_algorithms: Option>, + ///

One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: SHA1 | SHA2-256

+ pub phase_2_integrity_algorithms: Option>, + ///

The lifetime for phase 2 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 3,600. The value must be less than the value for Phase1LifetimeSeconds.

Default: 3600

+ pub phase_2_lifetime_seconds: Option, + ///

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.

Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

pub pre_shared_key: Option, + ///

The percentage of the rekey window (determined by RekeyMarginTimeSeconds) during which the rekey time is randomly selected.

Constraints: A value between 0 and 100.

Default: 100

+ pub rekey_fuzz_percentage: Option, + ///

The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 540

+ pub rekey_margin_time_seconds: Option, + ///

The number of packets in an IKE replay window.

Constraints: A value between 64 and 2048.

Default: 1024

+ pub replay_window_size: Option, ///

The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway.

Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following CIDR blocks are reserved and cannot be used:

  • 169.254.0.0/30

  • 169.254.1.0/30

  • 169.254.2.0/30

  • 169.254.3.0/30

  • 169.254.4.0/30

  • 169.254.5.0/30

  • 169.254.169.252/30

pub tunnel_inside_cidr: Option, } @@ -49705,15 +53301,105 @@ impl VpnTunnelOptionsSpecificationSerializer { prefix.push_str("."); } + if let Some(ref field_value) = obj.dpd_timeout_seconds { + params.put(&format!("{}{}", prefix, "DPDTimeoutSeconds"), &field_value); + } + if let Some(ref field_value) = obj.ike_versions { + IKEVersionsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "IKEVersion"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1dh_group_numbers { + Phase1DHGroupNumbersRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase1DHGroupNumber"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1_encryption_algorithms { + Phase1EncryptionAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase1EncryptionAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1_integrity_algorithms { + Phase1IntegrityAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase1IntegrityAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_1_lifetime_seconds { + params.put( + &format!("{}{}", prefix, "Phase1LifetimeSeconds"), + &field_value, + ); + } + if let Some(ref field_value) = obj.phase_2dh_group_numbers { + Phase2DHGroupNumbersRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase2DHGroupNumber"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_2_encryption_algorithms { + Phase2EncryptionAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase2EncryptionAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_2_integrity_algorithms { + Phase2IntegrityAlgorithmsRequestListSerializer::serialize( + params, + &format!("{}{}", prefix, "Phase2IntegrityAlgorithm"), + field_value, + ); + } + if let Some(ref field_value) = obj.phase_2_lifetime_seconds { + params.put( + &format!("{}{}", prefix, "Phase2LifetimeSeconds"), + &field_value, + ); + } if let Some(ref field_value) = obj.pre_shared_key { params.put(&format!("{}{}", prefix, "PreSharedKey"), &field_value); } + if let Some(ref field_value) = obj.rekey_fuzz_percentage { + params.put( + &format!("{}{}", prefix, "RekeyFuzzPercentage"), + &field_value, + ); + } + if let Some(ref field_value) = obj.rekey_margin_time_seconds { + params.put( + &format!("{}{}", prefix, "RekeyMarginTimeSeconds"), + &field_value, + ); + } + if let Some(ref field_value) = obj.replay_window_size { + params.put(&format!("{}{}", prefix, "ReplayWindowSize"), &field_value); + } if let Some(ref field_value) = obj.tunnel_inside_cidr { params.put(&format!("{}{}", prefix, "TunnelInsideCidr"), &field_value); } } } +/// Serialize `VpnTunnelOptionsSpecificationsList` contents to a `SignedRequest`. +struct VpnTunnelOptionsSpecificationsListSerializer; +impl VpnTunnelOptionsSpecificationsListSerializer { + fn serialize(params: &mut Params, name: &str, obj: &Vec) { + for (index, obj) in obj.iter().enumerate() { + let key = format!("{}.{}", name, index + 1); + VpnTunnelOptionsSpecificationSerializer::serialize(params, &key, obj); + } + } +} + #[derive(Default, Debug, Clone, PartialEq)] pub struct WithdrawByoipCidrRequest { ///

The public IPv4 address range, in CIDR notation.

@@ -50632,22 +54318,216 @@ impl AttachVolumeError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for AttachVolumeError { +impl fmt::Display for AttachVolumeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AttachVolumeError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by AttachVpnGateway +#[derive(Debug, PartialEq)] +pub enum AttachVpnGatewayError {} + +impl AttachVpnGatewayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for AttachVpnGatewayError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AttachVpnGatewayError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by AuthorizeClientVpnIngress +#[derive(Debug, PartialEq)] +pub enum AuthorizeClientVpnIngressError {} + +impl AuthorizeClientVpnIngressError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for AuthorizeClientVpnIngressError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AuthorizeClientVpnIngressError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by AuthorizeSecurityGroupEgress +#[derive(Debug, PartialEq)] +pub enum AuthorizeSecurityGroupEgressError {} + +impl AuthorizeSecurityGroupEgressError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for AuthorizeSecurityGroupEgressError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AuthorizeSecurityGroupEgressError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by AuthorizeSecurityGroupIngress +#[derive(Debug, PartialEq)] +pub enum AuthorizeSecurityGroupIngressError {} + +impl AuthorizeSecurityGroupIngressError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for AuthorizeSecurityGroupIngressError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AuthorizeSecurityGroupIngressError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by BundleInstance +#[derive(Debug, PartialEq)] +pub enum BundleInstanceError {} + +impl BundleInstanceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for BundleInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for AttachVolumeError { +impl Error for BundleInstanceError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by AttachVpnGateway +/// Errors returned by CancelBundleTask #[derive(Debug, PartialEq)] -pub enum AttachVpnGatewayError {} +pub enum CancelBundleTaskError {} -impl AttachVpnGatewayError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CancelBundleTaskError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50670,22 +54550,22 @@ impl AttachVpnGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for AttachVpnGatewayError { +impl fmt::Display for CancelBundleTaskError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for AttachVpnGatewayError { +impl Error for CancelBundleTaskError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by AuthorizeClientVpnIngress +/// Errors returned by CancelCapacityReservation #[derive(Debug, PartialEq)] -pub enum AuthorizeClientVpnIngressError {} +pub enum CancelCapacityReservationError {} -impl AuthorizeClientVpnIngressError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CancelCapacityReservationError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50708,24 +54588,22 @@ impl AuthorizeClientVpnIngressError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for AuthorizeClientVpnIngressError { +impl fmt::Display for CancelCapacityReservationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for AuthorizeClientVpnIngressError { +impl Error for CancelCapacityReservationError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by AuthorizeSecurityGroupEgress +/// Errors returned by CancelConversionTask #[derive(Debug, PartialEq)] -pub enum AuthorizeSecurityGroupEgressError {} +pub enum CancelConversionTaskError {} -impl AuthorizeSecurityGroupEgressError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CancelConversionTaskError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50748,24 +54626,22 @@ impl AuthorizeSecurityGroupEgressError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for AuthorizeSecurityGroupEgressError { +impl fmt::Display for CancelConversionTaskError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for AuthorizeSecurityGroupEgressError { +impl Error for CancelConversionTaskError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by AuthorizeSecurityGroupIngress +/// Errors returned by CancelExportTask #[derive(Debug, PartialEq)] -pub enum AuthorizeSecurityGroupIngressError {} +pub enum CancelExportTaskError {} -impl AuthorizeSecurityGroupIngressError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CancelExportTaskError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50788,22 +54664,22 @@ impl AuthorizeSecurityGroupIngressError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for AuthorizeSecurityGroupIngressError { +impl fmt::Display for CancelExportTaskError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for AuthorizeSecurityGroupIngressError { +impl Error for CancelExportTaskError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by BundleInstance +/// Errors returned by CancelImportTask #[derive(Debug, PartialEq)] -pub enum BundleInstanceError {} +pub enum CancelImportTaskError {} -impl BundleInstanceError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CancelImportTaskError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50826,22 +54702,24 @@ impl BundleInstanceError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for BundleInstanceError { +impl fmt::Display for CancelImportTaskError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for BundleInstanceError { +impl Error for CancelImportTaskError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelBundleTask +/// Errors returned by CancelReservedInstancesListing #[derive(Debug, PartialEq)] -pub enum CancelBundleTaskError {} +pub enum CancelReservedInstancesListingError {} -impl CancelBundleTaskError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CancelReservedInstancesListingError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50864,22 +54742,24 @@ impl CancelBundleTaskError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelBundleTaskError { +impl fmt::Display for CancelReservedInstancesListingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelBundleTaskError { +impl Error for CancelReservedInstancesListingError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelCapacityReservation +/// Errors returned by CancelSpotFleetRequests #[derive(Debug, PartialEq)] -pub enum CancelCapacityReservationError {} +pub enum EC2CancelSpotFleetRequestsError {} -impl CancelCapacityReservationError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl EC2CancelSpotFleetRequestsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50902,22 +54782,24 @@ impl CancelCapacityReservationError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelCapacityReservationError { +impl fmt::Display for EC2CancelSpotFleetRequestsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelCapacityReservationError { +impl Error for EC2CancelSpotFleetRequestsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelConversionTask +/// Errors returned by CancelSpotInstanceRequests #[derive(Debug, PartialEq)] -pub enum CancelConversionTaskError {} +pub enum CancelSpotInstanceRequestsError {} -impl CancelConversionTaskError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CancelSpotInstanceRequestsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50940,22 +54822,22 @@ impl CancelConversionTaskError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelConversionTaskError { +impl fmt::Display for CancelSpotInstanceRequestsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelConversionTaskError { +impl Error for CancelSpotInstanceRequestsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelExportTask +/// Errors returned by ConfirmProductInstance #[derive(Debug, PartialEq)] -pub enum CancelExportTaskError {} +pub enum ConfirmProductInstanceError {} -impl CancelExportTaskError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl ConfirmProductInstanceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -50978,22 +54860,22 @@ impl CancelExportTaskError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelExportTaskError { +impl fmt::Display for ConfirmProductInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelExportTaskError { +impl Error for ConfirmProductInstanceError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelImportTask +/// Errors returned by CopyFpgaImage #[derive(Debug, PartialEq)] -pub enum CancelImportTaskError {} +pub enum CopyFpgaImageError {} -impl CancelImportTaskError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CopyFpgaImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51016,24 +54898,22 @@ impl CancelImportTaskError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelImportTaskError { +impl fmt::Display for CopyFpgaImageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelImportTaskError { +impl Error for CopyFpgaImageError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelReservedInstancesListing +/// Errors returned by CopyImage #[derive(Debug, PartialEq)] -pub enum CancelReservedInstancesListingError {} +pub enum CopyImageError {} -impl CancelReservedInstancesListingError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CopyImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51056,24 +54936,22 @@ impl CancelReservedInstancesListingError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelReservedInstancesListingError { +impl fmt::Display for CopyImageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelReservedInstancesListingError { +impl Error for CopyImageError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelSpotFleetRequests +/// Errors returned by CopySnapshot #[derive(Debug, PartialEq)] -pub enum EC2CancelSpotFleetRequestsError {} +pub enum CopySnapshotError {} -impl EC2CancelSpotFleetRequestsError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CopySnapshotError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51096,24 +54974,22 @@ impl EC2CancelSpotFleetRequestsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for EC2CancelSpotFleetRequestsError { +impl fmt::Display for CopySnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for EC2CancelSpotFleetRequestsError { +impl Error for CopySnapshotError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CancelSpotInstanceRequests +/// Errors returned by CreateCapacityReservation #[derive(Debug, PartialEq)] -pub enum CancelSpotInstanceRequestsError {} +pub enum CreateCapacityReservationError {} -impl CancelSpotInstanceRequestsError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CreateCapacityReservationError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51136,22 +55012,22 @@ impl CancelSpotInstanceRequestsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CancelSpotInstanceRequestsError { +impl fmt::Display for CreateCapacityReservationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CancelSpotInstanceRequestsError { +impl Error for CreateCapacityReservationError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by ConfirmProductInstance +/// Errors returned by CreateClientVpnEndpoint #[derive(Debug, PartialEq)] -pub enum ConfirmProductInstanceError {} +pub enum CreateClientVpnEndpointError {} -impl ConfirmProductInstanceError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateClientVpnEndpointError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51174,22 +55050,22 @@ impl ConfirmProductInstanceError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ConfirmProductInstanceError { +impl fmt::Display for CreateClientVpnEndpointError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ConfirmProductInstanceError { +impl Error for CreateClientVpnEndpointError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CopyFpgaImage +/// Errors returned by CreateClientVpnRoute #[derive(Debug, PartialEq)] -pub enum CopyFpgaImageError {} +pub enum CreateClientVpnRouteError {} -impl CopyFpgaImageError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateClientVpnRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51212,22 +55088,22 @@ impl CopyFpgaImageError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CopyFpgaImageError { +impl fmt::Display for CreateClientVpnRouteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CopyFpgaImageError { +impl Error for CreateClientVpnRouteError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CopyImage +/// Errors returned by CreateCustomerGateway #[derive(Debug, PartialEq)] -pub enum CopyImageError {} +pub enum CreateCustomerGatewayError {} -impl CopyImageError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateCustomerGatewayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51250,22 +55126,22 @@ impl CopyImageError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CopyImageError { +impl fmt::Display for CreateCustomerGatewayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CopyImageError { +impl Error for CreateCustomerGatewayError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CopySnapshot +/// Errors returned by CreateDefaultSubnet #[derive(Debug, PartialEq)] -pub enum CopySnapshotError {} +pub enum CreateDefaultSubnetError {} -impl CopySnapshotError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateDefaultSubnetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51288,22 +55164,22 @@ impl CopySnapshotError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CopySnapshotError { +impl fmt::Display for CreateDefaultSubnetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CopySnapshotError { +impl Error for CreateDefaultSubnetError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateCapacityReservation +/// Errors returned by CreateDefaultVpc #[derive(Debug, PartialEq)] -pub enum CreateCapacityReservationError {} +pub enum CreateDefaultVpcError {} -impl CreateCapacityReservationError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateDefaultVpcError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51326,22 +55202,22 @@ impl CreateCapacityReservationError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateCapacityReservationError { +impl fmt::Display for CreateDefaultVpcError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateCapacityReservationError { +impl Error for CreateDefaultVpcError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateClientVpnEndpoint +/// Errors returned by CreateDhcpOptions #[derive(Debug, PartialEq)] -pub enum CreateClientVpnEndpointError {} +pub enum CreateDhcpOptionsError {} -impl CreateClientVpnEndpointError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateDhcpOptionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51364,22 +55240,24 @@ impl CreateClientVpnEndpointError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateClientVpnEndpointError { +impl fmt::Display for CreateDhcpOptionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateClientVpnEndpointError { +impl Error for CreateDhcpOptionsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateClientVpnRoute +/// Errors returned by CreateEgressOnlyInternetGateway #[derive(Debug, PartialEq)] -pub enum CreateClientVpnRouteError {} +pub enum CreateEgressOnlyInternetGatewayError {} -impl CreateClientVpnRouteError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateEgressOnlyInternetGatewayError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51402,22 +55280,22 @@ impl CreateClientVpnRouteError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateClientVpnRouteError { +impl fmt::Display for CreateEgressOnlyInternetGatewayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateClientVpnRouteError { +impl Error for CreateEgressOnlyInternetGatewayError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateCustomerGateway +/// Errors returned by CreateFleet #[derive(Debug, PartialEq)] -pub enum CreateCustomerGatewayError {} +pub enum CreateFleetError {} -impl CreateCustomerGatewayError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateFleetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51440,22 +55318,22 @@ impl CreateCustomerGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateCustomerGatewayError { +impl fmt::Display for CreateFleetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateCustomerGatewayError { +impl Error for CreateFleetError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateDefaultSubnet +/// Errors returned by CreateFlowLogs #[derive(Debug, PartialEq)] -pub enum CreateDefaultSubnetError {} +pub enum CreateFlowLogsError {} -impl CreateDefaultSubnetError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateFlowLogsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51478,22 +55356,22 @@ impl CreateDefaultSubnetError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateDefaultSubnetError { +impl fmt::Display for CreateFlowLogsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateDefaultSubnetError { +impl Error for CreateFlowLogsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateDefaultVpc +/// Errors returned by CreateFpgaImage #[derive(Debug, PartialEq)] -pub enum CreateDefaultVpcError {} +pub enum CreateFpgaImageError {} -impl CreateDefaultVpcError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateFpgaImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51516,22 +55394,22 @@ impl CreateDefaultVpcError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateDefaultVpcError { +impl fmt::Display for CreateFpgaImageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateDefaultVpcError { +impl Error for CreateFpgaImageError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateDhcpOptions +/// Errors returned by CreateImage #[derive(Debug, PartialEq)] -pub enum CreateDhcpOptionsError {} +pub enum CreateImageError {} -impl CreateDhcpOptionsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51554,24 +55432,22 @@ impl CreateDhcpOptionsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateDhcpOptionsError { +impl fmt::Display for CreateImageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateDhcpOptionsError { +impl Error for CreateImageError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateEgressOnlyInternetGateway +/// Errors returned by CreateInstanceExportTask #[derive(Debug, PartialEq)] -pub enum CreateEgressOnlyInternetGatewayError {} +pub enum CreateInstanceExportTaskError {} -impl CreateEgressOnlyInternetGatewayError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CreateInstanceExportTaskError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51594,22 +55470,22 @@ impl CreateEgressOnlyInternetGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateEgressOnlyInternetGatewayError { +impl fmt::Display for CreateInstanceExportTaskError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateEgressOnlyInternetGatewayError { +impl Error for CreateInstanceExportTaskError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateFleet +/// Errors returned by CreateInternetGateway #[derive(Debug, PartialEq)] -pub enum CreateFleetError {} +pub enum CreateInternetGatewayError {} -impl CreateFleetError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateInternetGatewayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51632,22 +55508,22 @@ impl CreateFleetError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateFleetError { +impl fmt::Display for CreateInternetGatewayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateFleetError { +impl Error for CreateInternetGatewayError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateFlowLogs +/// Errors returned by CreateKeyPair #[derive(Debug, PartialEq)] -pub enum CreateFlowLogsError {} +pub enum CreateKeyPairError {} -impl CreateFlowLogsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateKeyPairError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51670,22 +55546,22 @@ impl CreateFlowLogsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateFlowLogsError { +impl fmt::Display for CreateKeyPairError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateFlowLogsError { +impl Error for CreateKeyPairError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateFpgaImage +/// Errors returned by CreateLaunchTemplate #[derive(Debug, PartialEq)] -pub enum CreateFpgaImageError {} +pub enum CreateLaunchTemplateError {} -impl CreateFpgaImageError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateLaunchTemplateError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51708,22 +55584,24 @@ impl CreateFpgaImageError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateFpgaImageError { +impl fmt::Display for CreateLaunchTemplateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateFpgaImageError { +impl Error for CreateLaunchTemplateError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateImage +/// Errors returned by CreateLaunchTemplateVersion #[derive(Debug, PartialEq)] -pub enum CreateImageError {} +pub enum CreateLaunchTemplateVersionError {} -impl CreateImageError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateLaunchTemplateVersionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51746,22 +55624,22 @@ impl CreateImageError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateImageError { +impl fmt::Display for CreateLaunchTemplateVersionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateImageError { +impl Error for CreateLaunchTemplateVersionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateInstanceExportTask +/// Errors returned by CreateNatGateway #[derive(Debug, PartialEq)] -pub enum CreateInstanceExportTaskError {} +pub enum CreateNatGatewayError {} -impl CreateInstanceExportTaskError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateNatGatewayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51784,22 +55662,22 @@ impl CreateInstanceExportTaskError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateInstanceExportTaskError { +impl fmt::Display for CreateNatGatewayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateInstanceExportTaskError { +impl Error for CreateNatGatewayError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateInternetGateway +/// Errors returned by CreateNetworkAcl #[derive(Debug, PartialEq)] -pub enum CreateInternetGatewayError {} +pub enum CreateNetworkAclError {} -impl CreateInternetGatewayError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateNetworkAclError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51822,22 +55700,22 @@ impl CreateInternetGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateInternetGatewayError { +impl fmt::Display for CreateNetworkAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateInternetGatewayError { +impl Error for CreateNetworkAclError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateKeyPair +/// Errors returned by CreateNetworkAclEntry #[derive(Debug, PartialEq)] -pub enum CreateKeyPairError {} +pub enum CreateNetworkAclEntryError {} -impl CreateKeyPairError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateNetworkAclEntryError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51860,22 +55738,22 @@ impl CreateKeyPairError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateKeyPairError { +impl fmt::Display for CreateNetworkAclEntryError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateKeyPairError { +impl Error for CreateNetworkAclEntryError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateLaunchTemplate +/// Errors returned by CreateNetworkInterface #[derive(Debug, PartialEq)] -pub enum CreateLaunchTemplateError {} +pub enum CreateNetworkInterfaceError {} -impl CreateLaunchTemplateError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateNetworkInterfaceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51898,62 +55776,24 @@ impl CreateLaunchTemplateError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateLaunchTemplateError { +impl fmt::Display for CreateNetworkInterfaceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateLaunchTemplateError { +impl Error for CreateNetworkInterfaceError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateLaunchTemplateVersion +/// Errors returned by CreateNetworkInterfacePermission #[derive(Debug, PartialEq)] -pub enum CreateLaunchTemplateVersionError {} +pub enum CreateNetworkInterfacePermissionError {} -impl CreateLaunchTemplateVersionError { +impl CreateNetworkInterfacePermissionError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { - { - let reader = EventReader::new(res.body.as_ref()); - let mut stack = XmlResponse::new(reader.into_iter().peekable()); - find_start_element(&mut stack); - if let Ok(parsed_error) = Self::deserialize(&mut stack) { - match &parsed_error.code[..] { - _ => {} - } - } - } - RusotoError::Unknown(res) - } - - fn deserialize(stack: &mut T) -> Result - where - T: Peek + Next, - { - start_element("Response", stack)?; - start_element("Errors", stack)?; - XmlErrorDeserializer::deserialize("Error", stack) - } -} -impl fmt::Display for CreateLaunchTemplateVersionError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} -impl Error for CreateLaunchTemplateVersionError { - fn description(&self) -> &str { - match *self {} - } -} -/// Errors returned by CreateNatGateway -#[derive(Debug, PartialEq)] -pub enum CreateNatGatewayError {} - -impl CreateNatGatewayError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -51976,22 +55816,22 @@ impl CreateNatGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateNatGatewayError { +impl fmt::Display for CreateNetworkInterfacePermissionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateNatGatewayError { +impl Error for CreateNetworkInterfacePermissionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateNetworkAcl +/// Errors returned by CreatePlacementGroup #[derive(Debug, PartialEq)] -pub enum CreateNetworkAclError {} +pub enum CreatePlacementGroupError {} -impl CreateNetworkAclError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreatePlacementGroupError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52014,22 +55854,24 @@ impl CreateNetworkAclError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateNetworkAclError { +impl fmt::Display for CreatePlacementGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateNetworkAclError { +impl Error for CreatePlacementGroupError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateNetworkAclEntry +/// Errors returned by CreateReservedInstancesListing #[derive(Debug, PartialEq)] -pub enum CreateNetworkAclEntryError {} +pub enum CreateReservedInstancesListingError {} -impl CreateNetworkAclEntryError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateReservedInstancesListingError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52052,22 +55894,22 @@ impl CreateNetworkAclEntryError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateNetworkAclEntryError { +impl fmt::Display for CreateReservedInstancesListingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateNetworkAclEntryError { +impl Error for CreateReservedInstancesListingError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateNetworkInterface +/// Errors returned by CreateRoute #[derive(Debug, PartialEq)] -pub enum CreateNetworkInterfaceError {} +pub enum CreateRouteError {} -impl CreateNetworkInterfaceError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52090,24 +55932,22 @@ impl CreateNetworkInterfaceError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateNetworkInterfaceError { +impl fmt::Display for CreateRouteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateNetworkInterfaceError { +impl Error for CreateRouteError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateNetworkInterfacePermission +/// Errors returned by CreateRouteTable #[derive(Debug, PartialEq)] -pub enum CreateNetworkInterfacePermissionError {} +pub enum CreateRouteTableError {} -impl CreateNetworkInterfacePermissionError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CreateRouteTableError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52130,22 +55970,22 @@ impl CreateNetworkInterfacePermissionError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateNetworkInterfacePermissionError { +impl fmt::Display for CreateRouteTableError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateNetworkInterfacePermissionError { +impl Error for CreateRouteTableError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreatePlacementGroup +/// Errors returned by CreateSecurityGroup #[derive(Debug, PartialEq)] -pub enum CreatePlacementGroupError {} +pub enum CreateSecurityGroupError {} -impl CreatePlacementGroupError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateSecurityGroupError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52168,24 +56008,22 @@ impl CreatePlacementGroupError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreatePlacementGroupError { +impl fmt::Display for CreateSecurityGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreatePlacementGroupError { +impl Error for CreateSecurityGroupError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateReservedInstancesListing +/// Errors returned by CreateSnapshot #[derive(Debug, PartialEq)] -pub enum CreateReservedInstancesListingError {} +pub enum CreateSnapshotError {} -impl CreateReservedInstancesListingError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl CreateSnapshotError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52208,22 +56046,22 @@ impl CreateReservedInstancesListingError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateReservedInstancesListingError { +impl fmt::Display for CreateSnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateReservedInstancesListingError { +impl Error for CreateSnapshotError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateRoute +/// Errors returned by CreateSnapshots #[derive(Debug, PartialEq)] -pub enum CreateRouteError {} +pub enum CreateSnapshotsError {} -impl CreateRouteError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateSnapshotsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52246,22 +56084,24 @@ impl CreateRouteError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateRouteError { +impl fmt::Display for CreateSnapshotsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateRouteError { +impl Error for CreateSnapshotsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateRouteTable +/// Errors returned by CreateSpotDatafeedSubscription #[derive(Debug, PartialEq)] -pub enum CreateRouteTableError {} +pub enum CreateSpotDatafeedSubscriptionError {} -impl CreateRouteTableError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateSpotDatafeedSubscriptionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52284,22 +56124,22 @@ impl CreateRouteTableError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateRouteTableError { +impl fmt::Display for CreateSpotDatafeedSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateRouteTableError { +impl Error for CreateSpotDatafeedSubscriptionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateSecurityGroup +/// Errors returned by CreateSubnet #[derive(Debug, PartialEq)] -pub enum CreateSecurityGroupError {} +pub enum CreateSubnetError {} -impl CreateSecurityGroupError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateSubnetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52322,22 +56162,22 @@ impl CreateSecurityGroupError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateSecurityGroupError { +impl fmt::Display for CreateSubnetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateSecurityGroupError { +impl Error for CreateSubnetError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateSnapshot +/// Errors returned by CreateTags #[derive(Debug, PartialEq)] -pub enum CreateSnapshotError {} +pub enum CreateTagsError {} -impl CreateSnapshotError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateTagsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52360,22 +56200,22 @@ impl CreateSnapshotError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateSnapshotError { +impl fmt::Display for CreateTagsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateSnapshotError { +impl Error for CreateTagsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateSnapshots +/// Errors returned by CreateTrafficMirrorFilter #[derive(Debug, PartialEq)] -pub enum CreateSnapshotsError {} +pub enum CreateTrafficMirrorFilterError {} -impl CreateSnapshotsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateTrafficMirrorFilterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52398,24 +56238,24 @@ impl CreateSnapshotsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateSnapshotsError { +impl fmt::Display for CreateTrafficMirrorFilterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateSnapshotsError { +impl Error for CreateTrafficMirrorFilterError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateSpotDatafeedSubscription +/// Errors returned by CreateTrafficMirrorFilterRule #[derive(Debug, PartialEq)] -pub enum CreateSpotDatafeedSubscriptionError {} +pub enum CreateTrafficMirrorFilterRuleError {} -impl CreateSpotDatafeedSubscriptionError { +impl CreateTrafficMirrorFilterRuleError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52438,22 +56278,24 @@ impl CreateSpotDatafeedSubscriptionError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateSpotDatafeedSubscriptionError { +impl fmt::Display for CreateTrafficMirrorFilterRuleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateSpotDatafeedSubscriptionError { +impl Error for CreateTrafficMirrorFilterRuleError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateSubnet +/// Errors returned by CreateTrafficMirrorSession #[derive(Debug, PartialEq)] -pub enum CreateSubnetError {} +pub enum CreateTrafficMirrorSessionError {} -impl CreateSubnetError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateTrafficMirrorSessionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52476,22 +56318,22 @@ impl CreateSubnetError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateSubnetError { +impl fmt::Display for CreateTrafficMirrorSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateSubnetError { +impl Error for CreateTrafficMirrorSessionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by CreateTags +/// Errors returned by CreateTrafficMirrorTarget #[derive(Debug, PartialEq)] -pub enum CreateTagsError {} +pub enum CreateTrafficMirrorTargetError {} -impl CreateTagsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateTrafficMirrorTargetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -52514,12 +56356,12 @@ impl CreateTagsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for CreateTagsError { +impl fmt::Display for CreateTrafficMirrorTargetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for CreateTagsError { +impl Error for CreateTrafficMirrorTargetError { fn description(&self) -> &str { match *self {} } @@ -53248,22 +57090,174 @@ impl DeleteFleetsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteFleetsError { +impl fmt::Display for DeleteFleetsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteFleetsError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DeleteFlowLogs +#[derive(Debug, PartialEq)] +pub enum DeleteFlowLogsError {} + +impl DeleteFlowLogsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DeleteFlowLogsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteFlowLogsError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DeleteFpgaImage +#[derive(Debug, PartialEq)] +pub enum DeleteFpgaImageError {} + +impl DeleteFpgaImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DeleteFpgaImageError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteFpgaImageError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DeleteInternetGateway +#[derive(Debug, PartialEq)] +pub enum DeleteInternetGatewayError {} + +impl DeleteInternetGatewayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DeleteInternetGatewayError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteInternetGatewayError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DeleteKeyPair +#[derive(Debug, PartialEq)] +pub enum DeleteKeyPairError {} + +impl DeleteKeyPairError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DeleteKeyPairError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteFleetsError { +impl Error for DeleteKeyPairError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteFlowLogs +/// Errors returned by DeleteLaunchTemplate #[derive(Debug, PartialEq)] -pub enum DeleteFlowLogsError {} +pub enum DeleteLaunchTemplateError {} -impl DeleteFlowLogsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteLaunchTemplateError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53286,22 +57280,24 @@ impl DeleteFlowLogsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteFlowLogsError { +impl fmt::Display for DeleteLaunchTemplateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteFlowLogsError { +impl Error for DeleteLaunchTemplateError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteFpgaImage +/// Errors returned by DeleteLaunchTemplateVersions #[derive(Debug, PartialEq)] -pub enum DeleteFpgaImageError {} +pub enum DeleteLaunchTemplateVersionsError {} -impl DeleteFpgaImageError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteLaunchTemplateVersionsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53324,22 +57320,22 @@ impl DeleteFpgaImageError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteFpgaImageError { +impl fmt::Display for DeleteLaunchTemplateVersionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteFpgaImageError { +impl Error for DeleteLaunchTemplateVersionsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteInternetGateway +/// Errors returned by DeleteNatGateway #[derive(Debug, PartialEq)] -pub enum DeleteInternetGatewayError {} +pub enum DeleteNatGatewayError {} -impl DeleteInternetGatewayError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteNatGatewayError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53362,22 +57358,22 @@ impl DeleteInternetGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteInternetGatewayError { +impl fmt::Display for DeleteNatGatewayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteInternetGatewayError { +impl Error for DeleteNatGatewayError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteKeyPair +/// Errors returned by DeleteNetworkAcl #[derive(Debug, PartialEq)] -pub enum DeleteKeyPairError {} +pub enum DeleteNetworkAclError {} -impl DeleteKeyPairError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteNetworkAclError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53400,22 +57396,22 @@ impl DeleteKeyPairError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteKeyPairError { +impl fmt::Display for DeleteNetworkAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteKeyPairError { +impl Error for DeleteNetworkAclError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteLaunchTemplate +/// Errors returned by DeleteNetworkAclEntry #[derive(Debug, PartialEq)] -pub enum DeleteLaunchTemplateError {} +pub enum DeleteNetworkAclEntryError {} -impl DeleteLaunchTemplateError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteNetworkAclEntryError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53438,24 +57434,22 @@ impl DeleteLaunchTemplateError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteLaunchTemplateError { +impl fmt::Display for DeleteNetworkAclEntryError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteLaunchTemplateError { +impl Error for DeleteNetworkAclEntryError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteLaunchTemplateVersions +/// Errors returned by DeleteNetworkInterface #[derive(Debug, PartialEq)] -pub enum DeleteLaunchTemplateVersionsError {} +pub enum DeleteNetworkInterfaceError {} -impl DeleteLaunchTemplateVersionsError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl DeleteNetworkInterfaceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53478,22 +57472,24 @@ impl DeleteLaunchTemplateVersionsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteLaunchTemplateVersionsError { +impl fmt::Display for DeleteNetworkInterfaceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteLaunchTemplateVersionsError { +impl Error for DeleteNetworkInterfaceError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteNatGateway +/// Errors returned by DeleteNetworkInterfacePermission #[derive(Debug, PartialEq)] -pub enum DeleteNatGatewayError {} +pub enum DeleteNetworkInterfacePermissionError {} -impl DeleteNatGatewayError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteNetworkInterfacePermissionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53516,22 +57512,22 @@ impl DeleteNatGatewayError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteNatGatewayError { +impl fmt::Display for DeleteNetworkInterfacePermissionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteNatGatewayError { +impl Error for DeleteNetworkInterfacePermissionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteNetworkAcl +/// Errors returned by DeletePlacementGroup #[derive(Debug, PartialEq)] -pub enum DeleteNetworkAclError {} +pub enum DeletePlacementGroupError {} -impl DeleteNetworkAclError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeletePlacementGroupError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53554,22 +57550,22 @@ impl DeleteNetworkAclError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteNetworkAclError { +impl fmt::Display for DeletePlacementGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteNetworkAclError { +impl Error for DeletePlacementGroupError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteNetworkAclEntry +/// Errors returned by DeleteRoute #[derive(Debug, PartialEq)] -pub enum DeleteNetworkAclEntryError {} +pub enum DeleteRouteError {} -impl DeleteNetworkAclEntryError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteRouteError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53592,22 +57588,22 @@ impl DeleteNetworkAclEntryError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteNetworkAclEntryError { +impl fmt::Display for DeleteRouteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteNetworkAclEntryError { +impl Error for DeleteRouteError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteNetworkInterface +/// Errors returned by DeleteRouteTable #[derive(Debug, PartialEq)] -pub enum DeleteNetworkInterfaceError {} +pub enum DeleteRouteTableError {} -impl DeleteNetworkInterfaceError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteRouteTableError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53630,24 +57626,22 @@ impl DeleteNetworkInterfaceError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteNetworkInterfaceError { +impl fmt::Display for DeleteRouteTableError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteNetworkInterfaceError { +impl Error for DeleteRouteTableError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteNetworkInterfacePermission +/// Errors returned by DeleteSecurityGroup #[derive(Debug, PartialEq)] -pub enum DeleteNetworkInterfacePermissionError {} +pub enum DeleteSecurityGroupError {} -impl DeleteNetworkInterfacePermissionError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl DeleteSecurityGroupError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53670,22 +57664,22 @@ impl DeleteNetworkInterfacePermissionError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteNetworkInterfacePermissionError { +impl fmt::Display for DeleteSecurityGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteNetworkInterfacePermissionError { +impl Error for DeleteSecurityGroupError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeletePlacementGroup +/// Errors returned by DeleteSnapshot #[derive(Debug, PartialEq)] -pub enum DeletePlacementGroupError {} +pub enum DeleteSnapshotError {} -impl DeletePlacementGroupError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteSnapshotError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53708,22 +57702,24 @@ impl DeletePlacementGroupError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeletePlacementGroupError { +impl fmt::Display for DeleteSnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeletePlacementGroupError { +impl Error for DeleteSnapshotError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteRoute +/// Errors returned by DeleteSpotDatafeedSubscription #[derive(Debug, PartialEq)] -pub enum DeleteRouteError {} +pub enum DeleteSpotDatafeedSubscriptionError {} -impl DeleteRouteError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteSpotDatafeedSubscriptionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53746,22 +57742,22 @@ impl DeleteRouteError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteRouteError { +impl fmt::Display for DeleteSpotDatafeedSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteRouteError { +impl Error for DeleteSpotDatafeedSubscriptionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteRouteTable +/// Errors returned by DeleteSubnet #[derive(Debug, PartialEq)] -pub enum DeleteRouteTableError {} +pub enum DeleteSubnetError {} -impl DeleteRouteTableError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteSubnetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53784,22 +57780,22 @@ impl DeleteRouteTableError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteRouteTableError { +impl fmt::Display for DeleteSubnetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteRouteTableError { +impl Error for DeleteSubnetError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteSecurityGroup +/// Errors returned by DeleteTags #[derive(Debug, PartialEq)] -pub enum DeleteSecurityGroupError {} +pub enum DeleteTagsError {} -impl DeleteSecurityGroupError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteTagsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53822,22 +57818,22 @@ impl DeleteSecurityGroupError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteSecurityGroupError { +impl fmt::Display for DeleteTagsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteSecurityGroupError { +impl Error for DeleteTagsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteSnapshot +/// Errors returned by DeleteTrafficMirrorFilter #[derive(Debug, PartialEq)] -pub enum DeleteSnapshotError {} +pub enum DeleteTrafficMirrorFilterError {} -impl DeleteSnapshotError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteTrafficMirrorFilterError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53860,24 +57856,24 @@ impl DeleteSnapshotError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteSnapshotError { +impl fmt::Display for DeleteTrafficMirrorFilterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteSnapshotError { +impl Error for DeleteTrafficMirrorFilterError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteSpotDatafeedSubscription +/// Errors returned by DeleteTrafficMirrorFilterRule #[derive(Debug, PartialEq)] -pub enum DeleteSpotDatafeedSubscriptionError {} +pub enum DeleteTrafficMirrorFilterRuleError {} -impl DeleteSpotDatafeedSubscriptionError { +impl DeleteTrafficMirrorFilterRuleError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53900,22 +57896,24 @@ impl DeleteSpotDatafeedSubscriptionError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteSpotDatafeedSubscriptionError { +impl fmt::Display for DeleteTrafficMirrorFilterRuleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteSpotDatafeedSubscriptionError { +impl Error for DeleteTrafficMirrorFilterRuleError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteSubnet +/// Errors returned by DeleteTrafficMirrorSession #[derive(Debug, PartialEq)] -pub enum DeleteSubnetError {} +pub enum DeleteTrafficMirrorSessionError {} -impl DeleteSubnetError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteTrafficMirrorSessionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53938,22 +57936,22 @@ impl DeleteSubnetError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteSubnetError { +impl fmt::Display for DeleteTrafficMirrorSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteSubnetError { +impl Error for DeleteTrafficMirrorSessionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DeleteTags +/// Errors returned by DeleteTrafficMirrorTarget #[derive(Debug, PartialEq)] -pub enum DeleteTagsError {} +pub enum DeleteTrafficMirrorTargetError {} -impl DeleteTagsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteTrafficMirrorTargetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -53976,12 +57974,12 @@ impl DeleteTagsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DeleteTagsError { +impl fmt::Display for DeleteTrafficMirrorTargetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteTagsError { +impl Error for DeleteTrafficMirrorTargetError { fn description(&self) -> &str { match *self {} } @@ -55138,22 +59136,62 @@ impl DescribeCustomerGatewaysError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DescribeCustomerGatewaysError { +impl fmt::Display for DescribeCustomerGatewaysError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeCustomerGatewaysError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DescribeDhcpOptions +#[derive(Debug, PartialEq)] +pub enum DescribeDhcpOptionsError {} + +impl DescribeDhcpOptionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DescribeDhcpOptionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeCustomerGatewaysError { +impl Error for DescribeDhcpOptionsError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DescribeDhcpOptions +/// Errors returned by DescribeEgressOnlyInternetGateways #[derive(Debug, PartialEq)] -pub enum DescribeDhcpOptionsError {} +pub enum DescribeEgressOnlyInternetGatewaysError {} -impl DescribeDhcpOptionsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeEgressOnlyInternetGatewaysError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -55176,24 +59214,22 @@ impl DescribeDhcpOptionsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DescribeDhcpOptionsError { +impl fmt::Display for DescribeEgressOnlyInternetGatewaysError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeDhcpOptionsError { +impl Error for DescribeEgressOnlyInternetGatewaysError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DescribeEgressOnlyInternetGateways +/// Errors returned by DescribeElasticGpus #[derive(Debug, PartialEq)] -pub enum DescribeEgressOnlyInternetGatewaysError {} +pub enum DescribeElasticGpusError {} -impl DescribeEgressOnlyInternetGatewaysError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl DescribeElasticGpusError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -55216,22 +59252,22 @@ impl DescribeEgressOnlyInternetGatewaysError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DescribeEgressOnlyInternetGatewaysError { +impl fmt::Display for DescribeElasticGpusError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeEgressOnlyInternetGatewaysError { +impl Error for DescribeElasticGpusError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by DescribeElasticGpus +/// Errors returned by DescribeExportImageTasks #[derive(Debug, PartialEq)] -pub enum DescribeElasticGpusError {} +pub enum DescribeExportImageTasksError {} -impl DescribeElasticGpusError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeExportImageTasksError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -55254,12 +59290,12 @@ impl DescribeElasticGpusError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for DescribeElasticGpusError { +impl fmt::Display for DescribeExportImageTasksError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeElasticGpusError { +impl Error for DescribeExportImageTasksError { fn description(&self) -> &str { match *self {} } @@ -57430,6 +61466,126 @@ impl Error for DescribeTagsError { match *self {} } } +/// Errors returned by DescribeTrafficMirrorFilters +#[derive(Debug, PartialEq)] +pub enum DescribeTrafficMirrorFiltersError {} + +impl DescribeTrafficMirrorFiltersError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DescribeTrafficMirrorFiltersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeTrafficMirrorFiltersError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DescribeTrafficMirrorSessions +#[derive(Debug, PartialEq)] +pub enum DescribeTrafficMirrorSessionsError {} + +impl DescribeTrafficMirrorSessionsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DescribeTrafficMirrorSessionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeTrafficMirrorSessionsError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by DescribeTrafficMirrorTargets +#[derive(Debug, PartialEq)] +pub enum DescribeTrafficMirrorTargetsError {} + +impl DescribeTrafficMirrorTargetsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for DescribeTrafficMirrorTargetsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeTrafficMirrorTargetsError { + fn description(&self) -> &str { + match *self {} + } +} /// Errors returned by DescribeTransitGatewayAttachments #[derive(Debug, PartialEq)] pub enum DescribeTransitGatewayAttachmentsError {} @@ -59058,62 +63214,142 @@ impl EnableVolumeIOError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for EnableVolumeIOError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} -impl Error for EnableVolumeIOError { - fn description(&self) -> &str { - match *self {} - } -} -/// Errors returned by EnableVpcClassicLink -#[derive(Debug, PartialEq)] -pub enum EnableVpcClassicLinkError {} - -impl EnableVpcClassicLinkError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { - { - let reader = EventReader::new(res.body.as_ref()); - let mut stack = XmlResponse::new(reader.into_iter().peekable()); - find_start_element(&mut stack); - if let Ok(parsed_error) = Self::deserialize(&mut stack) { - match &parsed_error.code[..] { - _ => {} - } - } - } - RusotoError::Unknown(res) - } - - fn deserialize(stack: &mut T) -> Result - where - T: Peek + Next, - { - start_element("Response", stack)?; - start_element("Errors", stack)?; - XmlErrorDeserializer::deserialize("Error", stack) - } -} -impl fmt::Display for EnableVpcClassicLinkError { +impl fmt::Display for EnableVolumeIOError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for EnableVolumeIOError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by EnableVpcClassicLink +#[derive(Debug, PartialEq)] +pub enum EnableVpcClassicLinkError {} + +impl EnableVpcClassicLinkError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for EnableVpcClassicLinkError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for EnableVpcClassicLinkError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by EnableVpcClassicLinkDnsSupport +#[derive(Debug, PartialEq)] +pub enum EnableVpcClassicLinkDnsSupportError {} + +impl EnableVpcClassicLinkDnsSupportError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for EnableVpcClassicLinkDnsSupportError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for EnableVpcClassicLinkDnsSupportError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ExportClientVpnClientCertificateRevocationList +#[derive(Debug, PartialEq)] +pub enum ExportClientVpnClientCertificateRevocationListError {} + +impl ExportClientVpnClientCertificateRevocationListError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for ExportClientVpnClientCertificateRevocationListError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for EnableVpcClassicLinkError { +impl Error for ExportClientVpnClientCertificateRevocationListError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by EnableVpcClassicLinkDnsSupport +/// Errors returned by ExportClientVpnClientConfiguration #[derive(Debug, PartialEq)] -pub enum EnableVpcClassicLinkDnsSupportError {} +pub enum ExportClientVpnClientConfigurationError {} -impl EnableVpcClassicLinkDnsSupportError { +impl ExportClientVpnClientConfigurationError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -59136,24 +63372,22 @@ impl EnableVpcClassicLinkDnsSupportError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for EnableVpcClassicLinkDnsSupportError { +impl fmt::Display for ExportClientVpnClientConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for EnableVpcClassicLinkDnsSupportError { +impl Error for ExportClientVpnClientConfigurationError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by ExportClientVpnClientCertificateRevocationList +/// Errors returned by ExportImage #[derive(Debug, PartialEq)] -pub enum ExportClientVpnClientCertificateRevocationListError {} +pub enum ExportImageError {} -impl ExportClientVpnClientCertificateRevocationListError { - pub fn from_response( - res: BufferedHttpResponse, - ) -> RusotoError { +impl ExportImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -59176,24 +63410,24 @@ impl ExportClientVpnClientCertificateRevocationListError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ExportClientVpnClientCertificateRevocationListError { +impl fmt::Display for ExportImageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ExportClientVpnClientCertificateRevocationListError { +impl Error for ExportImageError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by ExportClientVpnClientConfiguration +/// Errors returned by ExportTransitGatewayRoutes #[derive(Debug, PartialEq)] -pub enum ExportClientVpnClientConfigurationError {} +pub enum ExportTransitGatewayRoutesError {} -impl ExportClientVpnClientConfigurationError { +impl ExportTransitGatewayRoutesError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -59216,24 +63450,24 @@ impl ExportClientVpnClientConfigurationError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ExportClientVpnClientConfigurationError { +impl fmt::Display for ExportTransitGatewayRoutesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ExportClientVpnClientConfigurationError { +impl Error for ExportTransitGatewayRoutesError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by ExportTransitGatewayRoutes +/// Errors returned by GetCapacityReservationUsage #[derive(Debug, PartialEq)] -pub enum ExportTransitGatewayRoutesError {} +pub enum GetCapacityReservationUsageError {} -impl ExportTransitGatewayRoutesError { +impl GetCapacityReservationUsageError { pub fn from_response( res: BufferedHttpResponse, - ) -> RusotoError { + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -59256,12 +63490,12 @@ impl ExportTransitGatewayRoutesError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ExportTransitGatewayRoutesError { +impl fmt::Display for GetCapacityReservationUsageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ExportTransitGatewayRoutesError { +impl Error for GetCapacityReservationUsageError { fn description(&self) -> &str { match *self {} } @@ -60692,6 +64926,126 @@ impl Error for ModifySubnetAttributeError { match *self {} } } +/// Errors returned by ModifyTrafficMirrorFilterNetworkServices +#[derive(Debug, PartialEq)] +pub enum ModifyTrafficMirrorFilterNetworkServicesError {} + +impl ModifyTrafficMirrorFilterNetworkServicesError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for ModifyTrafficMirrorFilterNetworkServicesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ModifyTrafficMirrorFilterNetworkServicesError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ModifyTrafficMirrorFilterRule +#[derive(Debug, PartialEq)] +pub enum ModifyTrafficMirrorFilterRuleError {} + +impl ModifyTrafficMirrorFilterRuleError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for ModifyTrafficMirrorFilterRuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ModifyTrafficMirrorFilterRuleError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ModifyTrafficMirrorSession +#[derive(Debug, PartialEq)] +pub enum ModifyTrafficMirrorSessionError {} + +impl ModifyTrafficMirrorSessionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for ModifyTrafficMirrorSessionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ModifyTrafficMirrorSessionError { + fn description(&self) -> &str { + match *self {} + } +} /// Errors returned by ModifyTransitGatewayVpcAttachment #[derive(Debug, PartialEq)] pub enum ModifyTransitGatewayVpcAttachmentError {} @@ -61034,22 +65388,100 @@ impl ModifyVpcPeeringConnectionOptionsError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ModifyVpcPeeringConnectionOptionsError { +impl fmt::Display for ModifyVpcPeeringConnectionOptionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ModifyVpcPeeringConnectionOptionsError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ModifyVpcTenancy +#[derive(Debug, PartialEq)] +pub enum ModifyVpcTenancyError {} + +impl ModifyVpcTenancyError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for ModifyVpcTenancyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ModifyVpcTenancyError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ModifyVpnConnection +#[derive(Debug, PartialEq)] +pub enum ModifyVpnConnectionError {} + +impl ModifyVpnConnectionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for ModifyVpnConnectionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ModifyVpcPeeringConnectionOptionsError { +impl Error for ModifyVpnConnectionError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by ModifyVpcTenancy +/// Errors returned by ModifyVpnTunnelCertificate #[derive(Debug, PartialEq)] -pub enum ModifyVpcTenancyError {} +pub enum ModifyVpnTunnelCertificateError {} -impl ModifyVpcTenancyError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl ModifyVpnTunnelCertificateError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -61072,22 +65504,22 @@ impl ModifyVpcTenancyError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ModifyVpcTenancyError { +impl fmt::Display for ModifyVpnTunnelCertificateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ModifyVpcTenancyError { +impl Error for ModifyVpnTunnelCertificateError { fn description(&self) -> &str { match *self {} } } -/// Errors returned by ModifyVpnConnection +/// Errors returned by ModifyVpnTunnelOptions #[derive(Debug, PartialEq)] -pub enum ModifyVpnConnectionError {} +pub enum ModifyVpnTunnelOptionsError {} -impl ModifyVpnConnectionError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl ModifyVpnTunnelOptionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); @@ -61110,12 +65542,12 @@ impl ModifyVpnConnectionError { XmlErrorDeserializer::deserialize("Error", stack) } } -impl fmt::Display for ModifyVpnConnectionError { +impl fmt::Display for ModifyVpnTunnelOptionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ModifyVpnConnectionError { +impl Error for ModifyVpnTunnelOptionsError { fn description(&self) -> &str { match *self {} } @@ -62474,6 +66906,44 @@ impl Error for SearchTransitGatewayRoutesError { match *self {} } } +/// Errors returned by SendDiagnosticInterrupt +#[derive(Debug, PartialEq)] +pub enum SendDiagnosticInterruptError {} + +impl SendDiagnosticInterruptError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("Response", stack)?; + start_element("Errors", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for SendDiagnosticInterruptError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for SendDiagnosticInterruptError { + fn description(&self) -> &str { + match *self {} + } +} /// Errors returned by StartInstances #[derive(Debug, PartialEq)] pub enum StartInstancesError {} @@ -62928,7 +67398,7 @@ pub trait Ec2 { fn assign_private_ip_addresses( &self, input: AssignPrivateIpAddressesRequest, - ) -> RusotoFuture<(), AssignPrivateIpAddressesError>; + ) -> RusotoFuture; ///

Associates an Elastic IP address with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

fn associate_address( @@ -62942,7 +67412,7 @@ pub trait Ec2 { input: AssociateClientVpnTargetNetworkRequest, ) -> RusotoFuture; - ///

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

+ ///

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

fn associate_dhcp_options( &self, input: AssociateDhcpOptionsRequest, @@ -62954,7 +67424,7 @@ pub trait Ec2 { input: AssociateIamInstanceProfileRequest, ) -> RusotoFuture; - ///

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn associate_route_table( &self, input: AssociateRouteTableRequest, @@ -62972,7 +67442,7 @@ pub trait Ec2 { input: AssociateTransitGatewayRouteTableRequest, ) -> RusotoFuture; - ///

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

+ ///

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

fn associate_vpc_cidr_block( &self, input: AssociateVpcCidrBlockRequest, @@ -62984,7 +67454,7 @@ pub trait Ec2 { input: AttachClassicLinkVpcRequest, ) -> RusotoFuture; - ///

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

+ ///

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

fn attach_internet_gateway( &self, input: AttachInternetGatewayRequest, @@ -62996,7 +67466,7 @@ pub trait Ec2 { input: AttachNetworkInterfaceRequest, ) -> RusotoFuture; - ///

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

+ ///

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

After you attach an EBS volume, you must make it available. For more information, see Making an EBS Volume Available For Use.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

fn attach_volume( &self, input: AttachVolumeRequest, @@ -63095,7 +67565,7 @@ pub trait Ec2 { ///

Initiates the copy of an AMI from the specified source Region to the current Region. You specify the destination Region by using its endpoint when making the request.

Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. You cannot create an unencrypted copy of an encrypted backing snapshot.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

fn copy_image(&self, input: CopyImageRequest) -> RusotoFuture; - ///

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same Region or from one Region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

+ ///

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same Region or from one Region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a different CMK.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

fn copy_snapshot( &self, input: CopySnapshotRequest, @@ -63119,25 +67589,25 @@ pub trait Ec2 { input: CreateClientVpnRouteRequest, ) -> RusotoFuture; - ///

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

+ ///

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

fn create_customer_gateway( &self, input: CreateCustomerGatewayRequest, ) -> RusotoFuture; - ///

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

fn create_default_subnet( &self, input: CreateDefaultSubnetRequest, ) -> RusotoFuture; - ///

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see "I really want a default VPC for my existing EC2 account. Is that possible?" in the Default VPCs FAQ.

+ ///

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see "I really want a default VPC for my existing EC2 account. Is that possible?" in the Default VPCs FAQ.

fn create_default_vpc( &self, input: CreateDefaultVpcRequest, ) -> RusotoFuture; - ///

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. ITo have your instance to receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

fn create_dhcp_options( &self, input: CreateDhcpOptionsRequest, @@ -63155,7 +67625,7 @@ pub trait Ec2 { input: CreateFleetRequest, ) -> RusotoFuture; - ///

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

fn create_flow_logs( &self, input: CreateFlowLogsRequest, @@ -63179,7 +67649,7 @@ pub trait Ec2 { input: CreateInstanceExportTaskRequest, ) -> RusotoFuture; - ///

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

+ ///

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

fn create_internet_gateway( &self, input: CreateInternetGatewayRequest, @@ -63203,19 +67673,19 @@ pub trait Ec2 { input: CreateLaunchTemplateVersionRequest, ) -> RusotoFuture; - ///

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

fn create_nat_gateway( &self, input: CreateNatGatewayRequest, ) -> RusotoFuture; - ///

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn create_network_acl( &self, input: CreateNetworkAclRequest, ) -> RusotoFuture; - ///

Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn create_network_acl_entry( &self, input: CreateNetworkAclEntryRequest, @@ -63245,13 +67715,13 @@ pub trait Ec2 { input: CreateReservedInstancesListingRequest, ) -> RusotoFuture; - ///

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

  • 192.0.2.0/24 (goes to some target A)

  • 192.0.2.0/28 (goes to some target B)

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

  • 192.0.2.0/24 (goes to some target A)

  • 192.0.2.0/28 (goes to some target B)

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn create_route( &self, input: CreateRouteRequest, ) -> RusotoFuture; - ///

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn create_route_table( &self, input: CreateRouteTableRequest, @@ -63281,7 +67751,7 @@ pub trait Ec2 { input: CreateSpotDatafeedSubscriptionRequest, ) -> RusotoFuture; - ///

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

fn create_subnet( &self, input: CreateSubnetRequest, @@ -63290,6 +67760,30 @@ pub trait Ec2 { ///

Adds or overwrites the specified tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

fn create_tags(&self, input: CreateTagsRequest) -> RusotoFuture<(), CreateTagsError>; + ///

Creates a Traffic Mirror filter.

A Traffic Mirror filter is a set of rules that defines the traffic to mirror.

By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule to add Traffic Mirror rules to the filter. The rules you add define what traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices to mirror supported network services.

+ fn create_traffic_mirror_filter( + &self, + input: CreateTrafficMirrorFilterRequest, + ) -> RusotoFuture; + + ///

Creates a Traffic Mirror rule.

A Traffic Mirror rule defines the Traffic Mirror source traffic to mirror.

You need the Traffic Mirror filter ID when you create the rule.

+ fn create_traffic_mirror_filter_rule( + &self, + input: CreateTrafficMirrorFilterRuleRequest, + ) -> RusotoFuture; + + ///

Creates a Traffic Mirror session.

A Traffic Mirror session actively copies packets from a Traffic Mirror source to a Traffic Mirror target. Create a filter, and then assign it to the session to define a subset of the traffic to mirror, for example all TCP traffic.

The Traffic Mirror source and the Traffic Mirror target (monitoring appliances) can be in the same VPC, or in a different VPC connected via VPC peering or a transit gateway.

By default, no traffic is mirrored. Use CreateTrafficMirrorFilter to create filter rules that specify the traffic to mirror.

+ fn create_traffic_mirror_session( + &self, + input: CreateTrafficMirrorSessionRequest, + ) -> RusotoFuture; + + ///

Creates a target for your Traffic Mirror session.

A Traffic Mirror target is the destination for mirrored traffic. The Traffic Mirror source and the Traffic Mirror target (monitoring appliances) can be in the same VPC, or in different VPCs connected via VPC peering or a transit gateway.

A Traffic Mirror target can be a network interface, or a Network Load Balancer.

To use the target in a Traffic Mirror session, use CreateTrafficMirrorSession.

+ fn create_traffic_mirror_target( + &self, + input: CreateTrafficMirrorTargetRequest, + ) -> RusotoFuture; + ///

Creates a transit gateway.

You can use a transit gateway to interconnect your virtual private clouds (VPC) and on-premises networks. After the transit gateway enters the available state, you can attach your VPCs and VPN connections to the transit gateway.

To attach your VPCs, use CreateTransitGatewayVpcAttachment.

To attach a VPN connection, use CreateCustomerGateway to create a customer gateway and specify the ID of the customer gateway and the ID of the transit gateway in a call to CreateVpnConnection.

When you create a transit gateway, we create a default transit gateway route table and use it as the default association route table and the default propagation route table. You can use CreateTransitGatewayRouteTable to create additional transit gateway route tables. If you disable automatic route propagation, we do not create a default transit gateway route table. You can use EnableTransitGatewayRouteTablePropagation to propagate routes from a resource attachment to a transit gateway route table. If you disable automatic associations, you can use AssociateTransitGatewayRouteTable to associate a resource attachment with a transit gateway route table.

fn create_transit_gateway( &self, @@ -63314,13 +67808,13 @@ pub trait Ec2 { input: CreateTransitGatewayVpcAttachmentRequest, ) -> RusotoFuture; - ///

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

+ ///

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

fn create_volume(&self, input: CreateVolumeRequest) -> RusotoFuture; - ///

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

+ ///

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

fn create_vpc(&self, input: CreateVpcRequest) -> RusotoFuture; - ///

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

+ ///

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

fn create_vpc_endpoint( &self, input: CreateVpcEndpointRequest, @@ -63335,7 +67829,7 @@ pub trait Ec2 { CreateVpcEndpointConnectionNotificationError, >; - ///

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

fn create_vpc_endpoint_service_configuration( &self, input: CreateVpcEndpointServiceConfigurationRequest, @@ -63344,13 +67838,13 @@ pub trait Ec2 { CreateVpcEndpointServiceConfigurationError, >; - ///

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

+ ///

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

fn create_vpc_peering_connection( &self, input: CreateVpcPeeringConnectionRequest, ) -> RusotoFuture; - ///

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The supported connection types are ipsec.1 and ipsec.2.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

+ ///

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The supported connection type is ipsec.1.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

fn create_vpn_connection( &self, input: CreateVpnConnectionRequest, @@ -63506,6 +68000,30 @@ pub trait Ec2 { ///

Deletes the specified set of tags from the specified set of resources.

To list the current tags, use DescribeTags. For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

fn delete_tags(&self, input: DeleteTagsRequest) -> RusotoFuture<(), DeleteTagsError>; + ///

Deletes the specified Traffic Mirror filter.

You cannot delete a Traffic Mirror filter that is in use by a Traffic Mirror session.

+ fn delete_traffic_mirror_filter( + &self, + input: DeleteTrafficMirrorFilterRequest, + ) -> RusotoFuture; + + ///

Deletes the specified Traffic Mirror rule.

+ fn delete_traffic_mirror_filter_rule( + &self, + input: DeleteTrafficMirrorFilterRuleRequest, + ) -> RusotoFuture; + + ///

Deletes the specified Traffic Mirror session.

+ fn delete_traffic_mirror_session( + &self, + input: DeleteTrafficMirrorSessionRequest, + ) -> RusotoFuture; + + ///

Deletes the specified Traffic Mirror target.

You cannot delete a Traffic Mirror target that is in use by a Traffic Mirror session.

+ fn delete_traffic_mirror_target( + &self, + input: DeleteTrafficMirrorTargetRequest, + ) -> RusotoFuture; + ///

Deletes the specified transit gateway.

fn delete_transit_gateway( &self, @@ -63689,7 +68207,7 @@ pub trait Ec2 { input: DescribeCustomerGatewaysRequest, ) -> RusotoFuture; - ///

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

fn describe_dhcp_options( &self, input: DescribeDhcpOptionsRequest, @@ -63710,7 +68228,13 @@ pub trait Ec2 { input: DescribeElasticGpusRequest, ) -> RusotoFuture; - ///

Describes the specified export tasks or all your export tasks.

+ ///

Describes the specified export image tasks or all your export image tasks.

+ fn describe_export_image_tasks( + &self, + input: DescribeExportImageTasksRequest, + ) -> RusotoFuture; + + ///

Describes the specified export instance tasks or all your export instance tasks.

fn describe_export_tasks( &self, input: DescribeExportTasksRequest, @@ -63836,7 +68360,7 @@ pub trait Ec2 { input: DescribeInstanceStatusRequest, ) -> RusotoFuture; - ///

Describes the specified instances or all of your instances.

If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

+ ///

Describes the specified instances or all of AWS account's instances.

If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

fn describe_instances( &self, input: DescribeInstancesRequest, @@ -63878,7 +68402,7 @@ pub trait Ec2 { input: DescribeNatGatewaysRequest, ) -> RusotoFuture; - ///

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn describe_network_acls( &self, input: DescribeNetworkAclsRequest, @@ -63929,7 +68453,7 @@ pub trait Ec2 { input: DescribePublicIpv4PoolsRequest, ) -> RusotoFuture; - ///

Describes the Regions that are currently available to you. The API returns a list of all the Regions, including Regions that are disabled for your account. For information about enabling Regions for your account, see Enabling and Disabling Regions in the AWS Billing and Cost Management User Guide.

For a list of the Regions supported by Amazon EC2, see Regions and Endpoints.

+ ///

Describes the Regions that are enabled for your account, or all Regions.

For a list of the Regions supported by Amazon EC2, see Regions and Endpoints.

For information about enabling and disabling Regions for your account, see Managing AWS Regions in the AWS General Reference.

fn describe_regions( &self, input: DescribeRegionsRequest, @@ -63965,7 +68489,7 @@ pub trait Ec2 { DescribeReservedInstancesOfferingsError, >; - ///

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn describe_route_tables( &self, input: DescribeRouteTablesRequest, @@ -64052,7 +68576,7 @@ pub trait Ec2 { input: DescribeStaleSecurityGroupsRequest, ) -> RusotoFuture; - ///

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

fn describe_subnets( &self, input: DescribeSubnetsRequest, @@ -64064,6 +68588,24 @@ pub trait Ec2 { input: DescribeTagsRequest, ) -> RusotoFuture; + ///

Describes one or more Traffic Mirror filters.

+ fn describe_traffic_mirror_filters( + &self, + input: DescribeTrafficMirrorFiltersRequest, + ) -> RusotoFuture; + + ///

Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror sessions are described. Alternatively, you can filter the results.

+ fn describe_traffic_mirror_sessions( + &self, + input: DescribeTrafficMirrorSessionsRequest, + ) -> RusotoFuture; + + ///

Information about one or more Traffic Mirror targets.

+ fn describe_traffic_mirror_targets( + &self, + input: DescribeTrafficMirrorTargetsRequest, + ) -> RusotoFuture; + ///

Describes one or more attachments between resources and transit gateways. By default, all attachments are described. Alternatively, you can filter the results by attachment ID, attachment state, resource ID, or resource owner.

fn describe_transit_gateway_attachments( &self, @@ -64232,7 +68774,7 @@ pub trait Ec2 { input: DetachVpnGatewayRequest, ) -> RusotoFuture<(), DetachVpnGatewayError>; - ///

Disables default encryption for EBS volumes that are created in your account in the current region.

Call this API if you have enabled default encryption using EnableEbsEncryptionByDefault and want to disable default EBS encryption. Once default EBS encryption is disabled, you can still create an encrypted volume by setting encrypted to true in the API call that creates the volume.

Disabling default EBS encryption will not change the encryption status of any of your existing volumes.

+ ///

Disables EBS encryption by default for your account in the current Region.

After you disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume.

Disabling encryption by default does not change the encryption status of your existing volumes.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn disable_ebs_encryption_by_default( &self, input: DisableEbsEncryptionByDefaultRequest, @@ -64286,7 +68828,7 @@ pub trait Ec2 { input: DisassociateIamInstanceProfileRequest, ) -> RusotoFuture; - ///

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn disassociate_route_table( &self, input: DisassociateRouteTableRequest, @@ -64313,7 +68855,7 @@ pub trait Ec2 { input: DisassociateVpcCidrBlockRequest, ) -> RusotoFuture; - ///

Enables default encryption for EBS volumes that are created in your account in the current region.

Once encryption is enabled with this action, EBS volumes that are created in your account will always be encrypted even if encryption is not specified at launch. This setting overrides the encrypted setting to true in all API calls that create EBS volumes in your account. A volume will be encrypted even if you specify encryption to be false in the API call that creates the volume.

If you do not specify a customer master key (CMK) in the API call that creates the EBS volume, then the volume is encrypted to your AWS account's managed CMK.

You can specify a CMK of your choice using ModifyEbsDefaultKmsKeyId.

Enabling encryption-by-default for EBS volumes has no effect on existing unencrypted volumes in your account. Encrypting the data in these requires manual action. You can either create an encrypted snapshot of an unencrypted volume, or encrypt a copy of an unencrypted snapshot. Any volume restored from an encrypted snapshot is also encrypted. For more information, see Amazon EBS Snapshots.

After EBS encryption-by-default is enabled, you can no longer launch older-generation instance types that do not support encryption. For more information, see Supported Instance Types.

+ ///

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported Instance Types.

fn enable_ebs_encryption_by_default( &self, input: EnableEbsEncryptionByDefaultRequest, @@ -64370,12 +68912,24 @@ pub trait Ec2 { ExportClientVpnClientConfigurationError, >; + ///

Exports an Amazon Machine Image (AMI) to a VM file. For more information, see Exporting a VM Directory from an Amazon Machine Image (AMI) in the VM Import/Export User Guide.

+ fn export_image( + &self, + input: ExportImageRequest, + ) -> RusotoFuture; + ///

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

fn export_transit_gateway_routes( &self, input: ExportTransitGatewayRoutesRequest, ) -> RusotoFuture; + ///

Gets usage information about a Capacity Reservation. If the Capacity Reservation is shared, it shows usage information for the Capacity Reservation owner and each AWS account that is currently using the shared capacity. If the Capacity Reservation is not shared, it shows only the Capacity Reservation owner's usage.

+ fn get_capacity_reservation_usage( + &self, + input: GetCapacityReservationUsageRequest, + ) -> RusotoFuture; + ///

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance Console Output in the Amazon Elastic Compute Cloud User Guide.

fn get_console_output( &self, @@ -64388,13 +68942,13 @@ pub trait Ec2 { input: GetConsoleScreenshotRequest, ) -> RusotoFuture; - ///

Describes the default customer master key (CMK) that your account uses to encrypt EBS volumes if you don’t specify a CMK in the API call. You can change this default using ModifyEbsDefaultKmsKeyId.

+ ///

Describes the default customer master key (CMK) for EBS encryption by default for your account in this Region. You can change the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn get_ebs_default_kms_key_id( &self, input: GetEbsDefaultKmsKeyIdRequest, ) -> RusotoFuture; - ///

Describes whether default EBS encryption is enabled for your account in the current region.

+ ///

Describes whether EBS encryption by default is enabled for your account in the current Region.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn get_ebs_encryption_by_default( &self, input: GetEbsEncryptionByDefaultRequest, @@ -64502,13 +69056,13 @@ pub trait Ec2 { input: ModifyClientVpnEndpointRequest, ) -> RusotoFuture; - ///

Changes the customer master key (CMK) that your account uses to encrypt EBS volumes if you don't specify a CMK in the API call.

By default, your account has an AWS-managed CMK that is used for encrypting an EBS volume when no CMK is specified in the API call that creates the volume. By calling this API, you can specify a customer-managed CMK to use in place of the AWS-managed CMK.

Note: Deleting or disabling the CMK that you have specified to act as your default CMK will result in instance-launch failures.

+ ///

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn modify_ebs_default_kms_key_id( &self, input: ModifyEbsDefaultKmsKeyIdRequest, ) -> RusotoFuture; - ///

Modifies the specified EC2 Fleet.

While the EC2 Fleet is being modified, it is in the modifying state.

+ ///

Modifies the specified EC2 Fleet.

You can only modify an EC2 Fleet request of type maintain.

While the EC2 Fleet is being modified, it is in the modifying state.

To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches the additional Spot Instances according to the allocation strategy for the EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the EC2 Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 Fleet cancels any open requests that exceed the new target capacity. You can request that the EC2 Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the EC2 Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the EC2 Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your EC2 Fleet for now, but will use it again later, you can set the target capacity to 0.

fn modify_fleet( &self, input: ModifyFleetRequest, @@ -64595,13 +69149,13 @@ pub trait Ec2 { input: ModifyReservedInstancesRequest, ) -> RusotoFuture; - ///

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

+ ///

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single operation. If you need to both add and remove account IDs for a snapshot, you must use multiple operations. You can make up to 500 modifications to a snapshot in a single operation.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

fn modify_snapshot_attribute( &self, input: ModifySnapshotAttributeRequest, ) -> RusotoFuture<(), ModifySnapshotAttributeError>; - ///

Modifies the specified Spot Fleet request.

You can only modify a Spot Fleet request of type maintain.

While the Spot Fleet request is being modified, it is in the modifying state.

To scale up your Spot Fleet, increase its target capacity. The Spot Fleet launches the additional Spot Instances according to the allocation strategy for the Spot Fleet request. If the allocation strategy is lowestPrice, the Spot Fleet launches instances using the Spot pool with the lowest price. If the allocation strategy is diversified, the Spot Fleet distributes the instances across the Spot pools.

To scale down your Spot Fleet, decrease its target capacity. First, the Spot Fleet cancels any open requests that exceed the new target capacity. You can request that the Spot Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot Fleet terminates the instances with the highest price per unit. If the allocation strategy is diversified, the Spot Fleet terminates instances across the Spot pools. Alternatively, you can request that the Spot Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your Spot Fleet for now, but will use it again later, you can set the target capacity to 0.

+ ///

Modifies the specified Spot Fleet request.

You can only modify a Spot Fleet request of type maintain.

While the Spot Fleet request is being modified, it is in the modifying state.

To scale up your Spot Fleet, increase its target capacity. The Spot Fleet launches the additional Spot Instances according to the allocation strategy for the Spot Fleet request. If the allocation strategy is lowestPrice, the Spot Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the Spot Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your Spot Fleet, decrease its target capacity. First, the Spot Fleet cancels any open requests that exceed the new target capacity. You can request that the Spot Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the Spot Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the Spot Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your Spot Fleet for now, but will use it again later, you can set the target capacity to 0.

fn modify_spot_fleet_request( &self, input: ModifySpotFleetRequestRequest, @@ -64613,13 +69167,34 @@ pub trait Ec2 { input: ModifySubnetAttributeRequest, ) -> RusotoFuture<(), ModifySubnetAttributeError>; + ///

Allows or restricts mirroring network services.

By default, Amazon DNS network services are not eligible for Traffic Mirror. Use AddNetworkServices to add network services to a Traffic Mirror filter. When a network service is added to the Traffic Mirror filter, all traffic related to that network service will be mirrored. When you no longer want to mirror network services, use RemoveNetworkServices to remove the network services from the Traffic Mirror filter.

FFor information about filter rule properties, see Network Services in the Traffic Mirroring User Guide .

+ fn modify_traffic_mirror_filter_network_services( + &self, + input: ModifyTrafficMirrorFilterNetworkServicesRequest, + ) -> RusotoFuture< + ModifyTrafficMirrorFilterNetworkServicesResult, + ModifyTrafficMirrorFilterNetworkServicesError, + >; + + ///

Modifies the specified Traffic Mirror rule.

DestinationCidrBlock and SourceCidrBlock must both be an IPv4 range or an IPv6 range.

+ fn modify_traffic_mirror_filter_rule( + &self, + input: ModifyTrafficMirrorFilterRuleRequest, + ) -> RusotoFuture; + + ///

Modifies a Traffic Mirror session.

+ fn modify_traffic_mirror_session( + &self, + input: ModifyTrafficMirrorSessionRequest, + ) -> RusotoFuture; + ///

Modifies the specified VPC attachment.

fn modify_transit_gateway_vpc_attachment( &self, input: ModifyTransitGatewayVpcAttachmentRequest, ) -> RusotoFuture; - ///

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using the DescribeVolumesModifications API. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

+ ///

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

fn modify_volume( &self, input: ModifyVolumeRequest, @@ -64637,7 +69212,7 @@ pub trait Ec2 { input: ModifyVpcAttributeRequest, ) -> RusotoFuture<(), ModifyVpcAttributeError>; - ///

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

+ ///

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

fn modify_vpc_endpoint( &self, input: ModifyVpcEndpointRequest, @@ -64661,7 +69236,7 @@ pub trait Ec2 { ModifyVpcEndpointServiceConfigurationError, >; - ///

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

+ ///

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

fn modify_vpc_endpoint_service_permissions( &self, input: ModifyVpcEndpointServicePermissionsRequest, @@ -64682,12 +69257,24 @@ pub trait Ec2 { input: ModifyVpcTenancyRequest, ) -> RusotoFuture; - ///

Modifies the target gateway of a AWS Site-to-Site VPN connection. The following migration options are available:

  • An existing virtual private gateway to a new virtual private gateway

  • An existing virtual private gateway to a transit gateway

  • An existing transit gateway to a new transit gateway

  • An existing transit gateway to a virtual private gateway

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your s2slong; connection will be temporarily unavailable for approximately 10 minutes while we provision the new endpoints

+ ///

Modifies the target gateway of an AWS Site-to-Site VPN connection. The following migration options are available:

  • An existing virtual private gateway to a new virtual private gateway

  • An existing virtual private gateway to a transit gateway

  • An existing transit gateway to a new transit gateway

  • An existing transit gateway to a virtual private gateway

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your s2slong; connection will be temporarily unavailable for approximately 10 minutes while we provision the new endpoints

fn modify_vpn_connection( &self, input: ModifyVpnConnectionRequest, ) -> RusotoFuture; + ///

Modifies the VPN tunnel endpoint certificate.

+ fn modify_vpn_tunnel_certificate( + &self, + input: ModifyVpnTunnelCertificateRequest, + ) -> RusotoFuture; + + ///

Modifies the options for a VPN tunnel in an AWS Site-to-Site VPN connection. You can modify multiple options for a tunnel in a single request, but you can only modify one tunnel at a time. For more information, see Site-to-Site VPN Tunnel Options for Your Site-to-Site VPN Connection in the AWS Site-to-Site VPN User Guide.

+ fn modify_vpn_tunnel_options( + &self, + input: ModifyVpnTunnelOptionsRequest, + ) -> RusotoFuture; + ///

Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

To disable detailed monitoring, see .

fn monitor_instances( &self, @@ -64775,22 +69362,22 @@ pub trait Ec2 { ReplaceIamInstanceProfileAssociationError, >; - ///

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

+ ///

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

fn replace_network_acl_association( &self, input: ReplaceNetworkAclAssociationRequest, ) -> RusotoFuture; - ///

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn replace_network_acl_entry( &self, input: ReplaceNetworkAclEntryRequest, ) -> RusotoFuture<(), ReplaceNetworkAclEntryError>; - ///

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn replace_route(&self, input: ReplaceRouteRequest) -> RusotoFuture<(), ReplaceRouteError>; - ///

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

+ ///

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

fn replace_route_table_association( &self, input: ReplaceRouteTableAssociationRequest, @@ -64808,7 +69395,7 @@ pub trait Ec2 { input: ReportInstanceStatusRequest, ) -> RusotoFuture<(), ReportInstanceStatusError>; - ///

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Instances. You cannot tag other resource types in a Spot Fleet request because only the instance resource type is supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

+ ///

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot Instance pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Instances. You cannot tag other resource types in a Spot Fleet request because only the instance resource type is supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

fn request_spot_fleet( &self, input: RequestSpotFleetRequest, @@ -64820,7 +69407,7 @@ pub trait Ec2 { input: RequestSpotInstancesRequest, ) -> RusotoFuture; - ///

Resets the account's default customer master key (CMK) to the account's AWS-managed default CMK. This default CMK is used to encrypt EBS volumes when you have enabled EBS encryption by default without specifying a CMK in the API call. If you have not enabled encryption by default, then this CMK is used when you set the Encrypted parameter to true without specifying a custom CMK in the API call.

Call this API if you have modified the default CMK that is used for encrypting your EBS volume using ModifyEbsDefaultKmsKeyId and you want to reset it to the AWS-managed default CMK. After resetting, you can continue to provide a CMK of your choice in the API call that creates the volume. However, if no CMK is specified, your account will encrypt the volume to the AWS-managed default CMK.

+ ///

Resets the default customer master key (CMK) for EBS encryption for your account in this Region to the AWS managed CMK for EBS.

After resetting the default CMK to the AWS managed CMK, you can continue to encrypt by a customer managed CMK by specifying it when you create the volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn reset_ebs_default_kms_key_id( &self, input: ResetEbsDefaultKmsKeyIdRequest, @@ -64898,6 +69485,12 @@ pub trait Ec2 { input: SearchTransitGatewayRoutesRequest, ) -> RusotoFuture; + ///

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic Interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

+ fn send_diagnostic_interrupt( + &self, + input: SendDiagnosticInterruptRequest, + ) -> RusotoFuture<(), SendDiagnosticInterruptError>; + ///

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

fn start_instances( &self, @@ -64976,10 +69569,7 @@ impl Ec2Client { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> Ec2Client { - Ec2Client { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -64993,10 +69583,14 @@ impl Ec2Client { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - Ec2Client { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Ec2Client { + Ec2Client { client, region } } } @@ -65035,7 +69629,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65083,7 +69677,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65128,7 +69722,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65173,7 +69767,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65221,7 +69815,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65269,7 +69863,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65317,7 +69911,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65367,7 +69961,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65415,7 +70009,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65435,7 +70029,7 @@ impl Ec2 for Ec2Client { fn assign_private_ip_addresses( &self, input: AssignPrivateIpAddressesRequest, - ) -> RusotoFuture<(), AssignPrivateIpAddressesError> { + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); let mut params = Params::new(); @@ -65452,7 +70046,27 @@ impl Ec2 for Ec2Client { })); } - Box::new(future::ok(::std::mem::drop(response))) + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = AssignPrivateIpAddressesResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = AssignPrivateIpAddressesResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) }) } @@ -65488,7 +70102,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65536,7 +70150,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65552,7 +70166,7 @@ impl Ec2 for Ec2Client { }) } - ///

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

+ ///

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

fn associate_dhcp_options( &self, input: AssociateDhcpOptionsRequest, @@ -65608,7 +70222,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65624,7 +70238,7 @@ impl Ec2 for Ec2Client { }) } - ///

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn associate_route_table( &self, input: AssociateRouteTableRequest, @@ -65655,7 +70269,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65700,7 +70314,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65748,7 +70362,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65764,7 +70378,7 @@ impl Ec2 for Ec2Client { }) } - ///

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

+ ///

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

fn associate_vpc_cidr_block( &self, input: AssociateVpcCidrBlockRequest, @@ -65793,7 +70407,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65840,7 +70454,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65856,7 +70470,7 @@ impl Ec2 for Ec2Client { }) } - ///

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

+ ///

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

fn attach_internet_gateway( &self, input: AttachInternetGatewayRequest, @@ -65910,7 +70524,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -65926,7 +70540,7 @@ impl Ec2 for Ec2Client { }) } - ///

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

+ ///

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

After you attach an EBS volume, you must make it available. For more information, see Making an EBS Volume Available For Use.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

fn attach_volume( &self, input: AttachVolumeRequest, @@ -65958,7 +70572,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66004,7 +70618,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66049,7 +70663,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66147,7 +70761,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66195,7 +70809,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66240,7 +70854,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66343,7 +70957,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66389,7 +71003,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66434,7 +71048,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66479,7 +71093,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66524,7 +71138,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66572,7 +71186,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66615,7 +71229,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66629,7 +71243,7 @@ impl Ec2 for Ec2Client { }) } - ///

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same Region or from one Region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

+ ///

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same Region or from one Region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a different CMK.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

fn copy_snapshot( &self, input: CopySnapshotRequest, @@ -66661,7 +71275,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66704,7 +71318,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66749,7 +71363,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66796,7 +71410,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66812,7 +71426,7 @@ impl Ec2 for Ec2Client { }) } - ///

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

+ ///

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

fn create_customer_gateway( &self, input: CreateCustomerGatewayRequest, @@ -66841,7 +71455,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66857,7 +71471,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

fn create_default_subnet( &self, input: CreateDefaultSubnetRequest, @@ -66888,7 +71502,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66904,7 +71518,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see "I really want a default VPC for my existing EC2 account. Is that possible?" in the Default VPCs FAQ.

+ ///

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see "I really want a default VPC for my existing EC2 account. Is that possible?" in the Default VPCs FAQ.

fn create_default_vpc( &self, input: CreateDefaultVpcRequest, @@ -66936,7 +71550,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -66952,7 +71566,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. ITo have your instance to receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

fn create_dhcp_options( &self, input: CreateDhcpOptionsRequest, @@ -66984,7 +71598,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67032,7 +71646,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67080,7 +71694,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67094,7 +71708,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

fn create_flow_logs( &self, input: CreateFlowLogsRequest, @@ -67126,7 +71740,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67174,7 +71788,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67222,7 +71836,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67265,7 +71879,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67281,7 +71895,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

+ ///

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

fn create_internet_gateway( &self, input: CreateInternetGatewayRequest, @@ -67310,7 +71924,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67358,7 +71972,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67402,7 +72016,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67447,7 +72061,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67463,7 +72077,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

fn create_nat_gateway( &self, input: CreateNatGatewayRequest, @@ -67495,7 +72109,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67511,7 +72125,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn create_network_acl( &self, input: CreateNetworkAclRequest, @@ -67543,7 +72157,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67559,7 +72173,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn create_network_acl_entry( &self, input: CreateNetworkAclEntryRequest, @@ -67613,7 +72227,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67661,7 +72275,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67734,7 +72348,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67750,7 +72364,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

  • 192.0.2.0/24 (goes to some target A)

  • 192.0.2.0/28 (goes to some target B)

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

  • 192.0.2.0/24 (goes to some target A)

  • 192.0.2.0/28 (goes to some target B)

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn create_route( &self, input: CreateRouteRequest, @@ -67782,7 +72396,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67796,7 +72410,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn create_route_table( &self, input: CreateRouteTableRequest, @@ -67828,7 +72442,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67875,7 +72489,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67923,7 +72537,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -67968,7 +72582,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68014,7 +72628,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68030,7 +72644,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

fn create_subnet( &self, input: CreateSubnetRequest, @@ -68062,7 +72676,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68101,6 +72715,186 @@ impl Ec2 for Ec2Client { }) } + ///

Creates a Traffic Mirror filter.

A Traffic Mirror filter is a set of rules that defines the traffic to mirror.

By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule to add Traffic Mirror rules to the filter. The rules you add define what traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices to mirror supported network services.

+ fn create_traffic_mirror_filter( + &self, + input: CreateTrafficMirrorFilterRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "CreateTrafficMirrorFilter"); + params.put("Version", "2016-11-15"); + CreateTrafficMirrorFilterRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(CreateTrafficMirrorFilterError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = CreateTrafficMirrorFilterResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = CreateTrafficMirrorFilterResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Creates a Traffic Mirror rule.

A Traffic Mirror rule defines the Traffic Mirror source traffic to mirror.

You need the Traffic Mirror filter ID when you create the rule.

+ fn create_traffic_mirror_filter_rule( + &self, + input: CreateTrafficMirrorFilterRuleRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "CreateTrafficMirrorFilterRule"); + params.put("Version", "2016-11-15"); + CreateTrafficMirrorFilterRuleRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(CreateTrafficMirrorFilterRuleError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = CreateTrafficMirrorFilterRuleResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = CreateTrafficMirrorFilterRuleResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Creates a Traffic Mirror session.

A Traffic Mirror session actively copies packets from a Traffic Mirror source to a Traffic Mirror target. Create a filter, and then assign it to the session to define a subset of the traffic to mirror, for example all TCP traffic.

The Traffic Mirror source and the Traffic Mirror target (monitoring appliances) can be in the same VPC, or in a different VPC connected via VPC peering or a transit gateway.

By default, no traffic is mirrored. Use CreateTrafficMirrorFilter to create filter rules that specify the traffic to mirror.

+ fn create_traffic_mirror_session( + &self, + input: CreateTrafficMirrorSessionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "CreateTrafficMirrorSession"); + params.put("Version", "2016-11-15"); + CreateTrafficMirrorSessionRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(CreateTrafficMirrorSessionError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = CreateTrafficMirrorSessionResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = CreateTrafficMirrorSessionResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Creates a target for your Traffic Mirror session.

A Traffic Mirror target is the destination for mirrored traffic. The Traffic Mirror source and the Traffic Mirror target (monitoring appliances) can be in the same VPC, or in different VPCs connected via VPC peering or a transit gateway.

A Traffic Mirror target can be a network interface, or a Network Load Balancer.

To use the target in a Traffic Mirror session, use CreateTrafficMirrorSession.

+ fn create_traffic_mirror_target( + &self, + input: CreateTrafficMirrorTargetRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "CreateTrafficMirrorTarget"); + params.put("Version", "2016-11-15"); + CreateTrafficMirrorTargetRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(CreateTrafficMirrorTargetError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = CreateTrafficMirrorTargetResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = CreateTrafficMirrorTargetResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Creates a transit gateway.

You can use a transit gateway to interconnect your virtual private clouds (VPC) and on-premises networks. After the transit gateway enters the available state, you can attach your VPCs and VPN connections to the transit gateway.

To attach your VPCs, use CreateTransitGatewayVpcAttachment.

To attach a VPN connection, use CreateCustomerGateway to create a customer gateway and specify the ID of the customer gateway and the ID of the transit gateway in a call to CreateVpnConnection.

When you create a transit gateway, we create a default transit gateway route table and use it as the default association route table and the default propagation route table. You can use CreateTransitGatewayRouteTable to create additional transit gateway route tables. If you disable automatic route propagation, we do not create a default transit gateway route table. You can use EnableTransitGatewayRouteTablePropagation to propagate routes from a resource attachment to a transit gateway route table. If you disable automatic associations, you can use AssociateTransitGatewayRouteTable to associate a resource attachment with a transit gateway route table.

fn create_transit_gateway( &self, @@ -68132,7 +72926,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68177,7 +72971,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68223,7 +73017,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68271,7 +73065,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68287,7 +73081,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

+ ///

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

fn create_volume(&self, input: CreateVolumeRequest) -> RusotoFuture { let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); let mut params = Params::new(); @@ -68316,7 +73110,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68329,7 +73123,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

+ ///

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

fn create_vpc(&self, input: CreateVpcRequest) -> RusotoFuture { let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); let mut params = Params::new(); @@ -68358,7 +73152,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68372,7 +73166,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

+ ///

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

fn create_vpc_endpoint( &self, input: CreateVpcEndpointRequest, @@ -68404,7 +73198,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68458,7 +73252,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68475,7 +73269,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

+ ///

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

fn create_vpc_endpoint_service_configuration( &self, input: CreateVpcEndpointServiceConfigurationRequest, @@ -68509,7 +73303,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68525,7 +73319,7 @@ impl Ec2 for Ec2Client { }) } - ///

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

+ ///

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

fn create_vpc_peering_connection( &self, input: CreateVpcPeeringConnectionRequest, @@ -68554,7 +73348,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68570,7 +73364,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The supported connection types are ipsec.1 and ipsec.2.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

+ ///

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The supported connection type is ipsec.1.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

fn create_vpn_connection( &self, input: CreateVpnConnectionRequest, @@ -68601,7 +73395,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68674,7 +73468,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68719,7 +73513,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68766,7 +73560,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68867,7 +73661,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68915,7 +73709,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -68961,7 +73755,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69009,7 +73803,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69106,7 +73900,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69151,7 +73945,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69199,7 +73993,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69325,7 +74119,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69551,6 +74345,186 @@ impl Ec2 for Ec2Client { }) } + ///

Deletes the specified Traffic Mirror filter.

You cannot delete a Traffic Mirror filter that is in use by a Traffic Mirror session.

+ fn delete_traffic_mirror_filter( + &self, + input: DeleteTrafficMirrorFilterRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DeleteTrafficMirrorFilter"); + params.put("Version", "2016-11-15"); + DeleteTrafficMirrorFilterRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteTrafficMirrorFilterError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DeleteTrafficMirrorFilterResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DeleteTrafficMirrorFilterResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Deletes the specified Traffic Mirror rule.

+ fn delete_traffic_mirror_filter_rule( + &self, + input: DeleteTrafficMirrorFilterRuleRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DeleteTrafficMirrorFilterRule"); + params.put("Version", "2016-11-15"); + DeleteTrafficMirrorFilterRuleRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteTrafficMirrorFilterRuleError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DeleteTrafficMirrorFilterRuleResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DeleteTrafficMirrorFilterRuleResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Deletes the specified Traffic Mirror session.

+ fn delete_traffic_mirror_session( + &self, + input: DeleteTrafficMirrorSessionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DeleteTrafficMirrorSession"); + params.put("Version", "2016-11-15"); + DeleteTrafficMirrorSessionRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteTrafficMirrorSessionError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DeleteTrafficMirrorSessionResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DeleteTrafficMirrorSessionResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Deletes the specified Traffic Mirror target.

You cannot delete a Traffic Mirror target that is in use by a Traffic Mirror session.

+ fn delete_traffic_mirror_target( + &self, + input: DeleteTrafficMirrorTargetRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DeleteTrafficMirrorTarget"); + params.put("Version", "2016-11-15"); + DeleteTrafficMirrorTargetRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DeleteTrafficMirrorTargetError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DeleteTrafficMirrorTargetResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DeleteTrafficMirrorTargetResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Deletes the specified transit gateway.

fn delete_transit_gateway( &self, @@ -69582,7 +74556,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69627,7 +74601,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69673,7 +74647,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69721,7 +74695,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69823,7 +74797,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69874,7 +74848,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69922,7 +74896,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -69967,7 +74941,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70094,7 +75068,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70167,7 +75141,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70215,7 +75189,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70260,7 +75234,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70305,7 +75279,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70352,7 +75326,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70400,7 +75374,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70445,7 +75419,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70490,7 +75464,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70540,7 +75514,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70585,7 +75559,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70630,7 +75604,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70675,7 +75649,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70723,7 +75697,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70768,7 +75742,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70813,7 +75787,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70829,7 +75803,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

fn describe_dhcp_options( &self, input: DescribeDhcpOptionsRequest, @@ -70860,7 +75834,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70910,7 +75884,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70957,7 +75931,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -70973,7 +75947,52 @@ impl Ec2 for Ec2Client { }) } - ///

Describes the specified export tasks or all your export tasks.

+ ///

Describes the specified export image tasks or all your export image tasks.

+ fn describe_export_image_tasks( + &self, + input: DescribeExportImageTasksRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DescribeExportImageTasks"); + params.put("Version", "2016-11-15"); + DescribeExportImageTasksRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeExportImageTasksError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DescribeExportImageTasksResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DescribeExportImageTasksResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Describes the specified export instance tasks or all your export instance tasks.

fn describe_export_tasks( &self, input: DescribeExportTasksRequest, @@ -71004,7 +76023,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71051,7 +76070,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71096,7 +76115,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71144,7 +76163,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71192,7 +76211,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71237,7 +76256,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71285,7 +76304,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71333,7 +76352,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71378,7 +76397,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71426,7 +76445,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71474,7 +76493,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71522,7 +76541,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71567,7 +76586,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71612,7 +76631,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71657,7 +76676,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71702,7 +76721,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71747,7 +76766,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71792,7 +76811,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71840,7 +76859,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71885,7 +76904,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71901,7 +76920,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes the specified instances or all of your instances.

If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

+ ///

Describes the specified instances or all of AWS account's instances.

If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

fn describe_instances( &self, input: DescribeInstancesRequest, @@ -71933,7 +76952,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -71978,7 +76997,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72026,7 +77045,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72072,7 +77091,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72117,7 +77136,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72162,7 +77181,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72209,7 +77228,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72225,7 +77244,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn describe_network_acls( &self, input: DescribeNetworkAclsRequest, @@ -72256,7 +77275,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72304,7 +77323,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72354,7 +77373,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72399,7 +77418,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72444,7 +77463,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72491,7 +77510,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72536,7 +77555,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72581,7 +77600,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72597,7 +77616,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes the Regions that are currently available to you. The API returns a list of all the Regions, including Regions that are disabled for your account. For information about enabling Regions for your account, see Enabling and Disabling Regions in the AWS Billing and Cost Management User Guide.

For a list of the Regions supported by Amazon EC2, see Regions and Endpoints.

+ ///

Describes the Regions that are enabled for your account, or all Regions.

For a list of the Regions supported by Amazon EC2, see Regions and Endpoints.

For information about enabling and disabling Regions for your account, see Managing AWS Regions in the AWS General Reference.

fn describe_regions( &self, input: DescribeRegionsRequest, @@ -72629,7 +77648,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72674,7 +77693,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72722,7 +77741,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72772,7 +77791,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72822,7 +77841,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72838,7 +77857,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn describe_route_tables( &self, input: DescribeRouteTablesRequest, @@ -72869,7 +77888,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72919,7 +77938,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -72964,7 +77983,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73012,7 +78031,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73057,7 +78076,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73102,7 +78121,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73150,7 +78169,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73198,7 +78217,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73243,7 +78262,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73291,7 +78310,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73336,7 +78355,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73381,7 +78400,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73426,7 +78445,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73471,7 +78490,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73487,7 +78506,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

+ ///

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

fn describe_subnets( &self, input: DescribeSubnetsRequest, @@ -73519,7 +78538,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73567,7 +78586,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73581,6 +78600,141 @@ impl Ec2 for Ec2Client { }) } + ///

Describes one or more Traffic Mirror filters.

+ fn describe_traffic_mirror_filters( + &self, + input: DescribeTrafficMirrorFiltersRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DescribeTrafficMirrorFilters"); + params.put("Version", "2016-11-15"); + DescribeTrafficMirrorFiltersRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeTrafficMirrorFiltersError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DescribeTrafficMirrorFiltersResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DescribeTrafficMirrorFiltersResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror sessions are described. Alternatively, you can filter the results.

+ fn describe_traffic_mirror_sessions( + &self, + input: DescribeTrafficMirrorSessionsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DescribeTrafficMirrorSessions"); + params.put("Version", "2016-11-15"); + DescribeTrafficMirrorSessionsRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeTrafficMirrorSessionsError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DescribeTrafficMirrorSessionsResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DescribeTrafficMirrorSessionsResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Information about one or more Traffic Mirror targets.

+ fn describe_traffic_mirror_targets( + &self, + input: DescribeTrafficMirrorTargetsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "DescribeTrafficMirrorTargets"); + params.put("Version", "2016-11-15"); + DescribeTrafficMirrorTargetsRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeTrafficMirrorTargetsError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = DescribeTrafficMirrorTargetsResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = DescribeTrafficMirrorTargetsResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Describes one or more attachments between resources and transit gateways. By default, all attachments are described. Alternatively, you can filter the results by attachment ID, attachment state, resource ID, or resource owner.

fn describe_transit_gateway_attachments( &self, @@ -73613,7 +78767,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73661,7 +78815,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73711,7 +78865,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73756,7 +78910,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73801,7 +78955,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73848,7 +79002,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73896,7 +79050,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73941,7 +79095,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -73988,7 +79142,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74033,7 +79187,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74081,7 +79235,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74133,7 +79287,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74180,7 +79334,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74232,7 +79386,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74283,7 +79437,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74328,7 +79482,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74375,7 +79529,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74420,7 +79574,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74468,7 +79622,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74511,7 +79665,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74558,7 +79712,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74605,7 +79759,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74703,7 +79857,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74745,7 +79899,7 @@ impl Ec2 for Ec2Client { }) } - ///

Disables default encryption for EBS volumes that are created in your account in the current region.

Call this API if you have enabled default encryption using EnableEbsEncryptionByDefault and want to disable default EBS encryption. Once default EBS encryption is disabled, you can still create an encrypted volume by setting encrypted to true in the API call that creates the volume.

Disabling default EBS encryption will not change the encryption status of any of your existing volumes.

+ ///

Disables EBS encryption by default for your account in the current Region.

After you disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume.

Disabling encryption by default does not change the encryption status of your existing volumes.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn disable_ebs_encryption_by_default( &self, input: DisableEbsEncryptionByDefaultRequest, @@ -74774,7 +79928,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74826,7 +79980,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74897,7 +80051,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -74945,7 +80099,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75022,7 +80176,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75068,7 +80222,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75084,7 +80238,7 @@ impl Ec2 for Ec2Client { }) } - ///

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn disassociate_route_table( &self, input: DisassociateRouteTableRequest, @@ -75138,7 +80292,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75188,7 +80342,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75233,7 +80387,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75249,7 +80403,7 @@ impl Ec2 for Ec2Client { }) } - ///

Enables default encryption for EBS volumes that are created in your account in the current region.

Once encryption is enabled with this action, EBS volumes that are created in your account will always be encrypted even if encryption is not specified at launch. This setting overrides the encrypted setting to true in all API calls that create EBS volumes in your account. A volume will be encrypted even if you specify encryption to be false in the API call that creates the volume.

If you do not specify a customer master key (CMK) in the API call that creates the EBS volume, then the volume is encrypted to your AWS account's managed CMK.

You can specify a CMK of your choice using ModifyEbsDefaultKmsKeyId.

Enabling encryption-by-default for EBS volumes has no effect on existing unencrypted volumes in your account. Encrypting the data in these requires manual action. You can either create an encrypted snapshot of an unencrypted volume, or encrypt a copy of an unencrypted snapshot. Any volume restored from an encrypted snapshot is also encrypted. For more information, see Amazon EBS Snapshots.

After EBS encryption-by-default is enabled, you can no longer launch older-generation instance types that do not support encryption. For more information, see Supported Instance Types.

+ ///

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported Instance Types.

fn enable_ebs_encryption_by_default( &self, input: EnableEbsEncryptionByDefaultRequest, @@ -75278,7 +80432,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75330,7 +80484,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75431,7 +80585,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75477,7 +80631,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75529,7 +80683,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true) + ParserConfig::new().trim_whitespace(false) ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75576,7 +80730,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75592,6 +80746,52 @@ impl Ec2 for Ec2Client { }) } + ///

Exports an Amazon Machine Image (AMI) to a VM file. For more information, see Exporting a VM Directory from an Amazon Machine Image (AMI) in the VM Import/Export User Guide.

+ fn export_image( + &self, + input: ExportImageRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "ExportImage"); + params.put("Version", "2016-11-15"); + ExportImageRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ExportImageError::from_response(response))), + ); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = ExportImageResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = + ExportImageResultDeserializer::deserialize(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

fn export_transit_gateway_routes( &self, @@ -75621,7 +80821,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75637,6 +80837,51 @@ impl Ec2 for Ec2Client { }) } + ///

Gets usage information about a Capacity Reservation. If the Capacity Reservation is shared, it shows usage information for the Capacity Reservation owner and each AWS account that is currently using the shared capacity. If the Capacity Reservation is not shared, it shows only the Capacity Reservation owner's usage.

+ fn get_capacity_reservation_usage( + &self, + input: GetCapacityReservationUsageRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "GetCapacityReservationUsage"); + params.put("Version", "2016-11-15"); + GetCapacityReservationUsageRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(GetCapacityReservationUsageError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = GetCapacityReservationUsageResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = GetCapacityReservationUsageResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance Console Output in the Amazon Elastic Compute Cloud User Guide.

fn get_console_output( &self, @@ -75669,7 +80914,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75716,7 +80961,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75732,7 +80977,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes the default customer master key (CMK) that your account uses to encrypt EBS volumes if you don’t specify a CMK in the API call. You can change this default using ModifyEbsDefaultKmsKeyId.

+ ///

Describes the default customer master key (CMK) for EBS encryption by default for your account in this Region. You can change the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn get_ebs_default_kms_key_id( &self, input: GetEbsDefaultKmsKeyIdRequest, @@ -75761,7 +81006,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75777,7 +81022,7 @@ impl Ec2 for Ec2Client { }) } - ///

Describes whether default EBS encryption is enabled for your account in the current region.

+ ///

Describes whether EBS encryption by default is enabled for your account in the current Region.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn get_ebs_encryption_by_default( &self, input: GetEbsEncryptionByDefaultRequest, @@ -75806,7 +81051,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75854,7 +81099,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75899,7 +81144,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75947,7 +81192,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -75995,7 +81240,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76049,7 +81294,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76104,7 +81349,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76159,7 +81404,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76212,7 +81457,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true) + ParserConfig::new().trim_whitespace(false) ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76257,7 +81502,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76303,7 +81548,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76351,7 +81596,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76397,7 +81642,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76445,7 +81690,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76488,7 +81733,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76533,7 +81778,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76549,7 +81794,7 @@ impl Ec2 for Ec2Client { }) } - ///

Changes the customer master key (CMK) that your account uses to encrypt EBS volumes if you don't specify a CMK in the API call.

By default, your account has an AWS-managed CMK that is used for encrypting an EBS volume when no CMK is specified in the API call that creates the volume. By calling this API, you can specify a customer-managed CMK to use in place of the AWS-managed CMK.

Note: Deleting or disabling the CMK that you have specified to act as your default CMK will result in instance-launch failures.

+ ///

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn modify_ebs_default_kms_key_id( &self, input: ModifyEbsDefaultKmsKeyIdRequest, @@ -76578,7 +81823,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76594,7 +81839,7 @@ impl Ec2 for Ec2Client { }) } - ///

Modifies the specified EC2 Fleet.

While the EC2 Fleet is being modified, it is in the modifying state.

+ ///

Modifies the specified EC2 Fleet.

You can only modify an EC2 Fleet request of type maintain.

While the EC2 Fleet is being modified, it is in the modifying state.

To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches the additional Spot Instances according to the allocation strategy for the EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the EC2 Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 Fleet cancels any open requests that exceed the new target capacity. You can request that the EC2 Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the EC2 Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the EC2 Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your EC2 Fleet for now, but will use it again later, you can set the target capacity to 0.

fn modify_fleet( &self, input: ModifyFleetRequest, @@ -76626,7 +81871,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76669,7 +81914,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76717,7 +81962,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76872,7 +82117,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76921,7 +82166,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -76966,7 +82211,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77011,7 +82256,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77058,7 +82303,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77130,7 +82375,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77146,7 +82391,7 @@ impl Ec2 for Ec2Client { }) } - ///

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

+ ///

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single operation. If you need to both add and remove account IDs for a snapshot, you must use multiple operations. You can make up to 500 modifications to a snapshot in a single operation.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

fn modify_snapshot_attribute( &self, input: ModifySnapshotAttributeRequest, @@ -77171,7 +82416,7 @@ impl Ec2 for Ec2Client { }) } - ///

Modifies the specified Spot Fleet request.

You can only modify a Spot Fleet request of type maintain.

While the Spot Fleet request is being modified, it is in the modifying state.

To scale up your Spot Fleet, increase its target capacity. The Spot Fleet launches the additional Spot Instances according to the allocation strategy for the Spot Fleet request. If the allocation strategy is lowestPrice, the Spot Fleet launches instances using the Spot pool with the lowest price. If the allocation strategy is diversified, the Spot Fleet distributes the instances across the Spot pools.

To scale down your Spot Fleet, decrease its target capacity. First, the Spot Fleet cancels any open requests that exceed the new target capacity. You can request that the Spot Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot Fleet terminates the instances with the highest price per unit. If the allocation strategy is diversified, the Spot Fleet terminates instances across the Spot pools. Alternatively, you can request that the Spot Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your Spot Fleet for now, but will use it again later, you can set the target capacity to 0.

+ ///

Modifies the specified Spot Fleet request.

You can only modify a Spot Fleet request of type maintain.

While the Spot Fleet request is being modified, it is in the modifying state.

To scale up your Spot Fleet, increase its target capacity. The Spot Fleet launches the additional Spot Instances according to the allocation strategy for the Spot Fleet request. If the allocation strategy is lowestPrice, the Spot Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the Spot Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your Spot Fleet, decrease its target capacity. First, the Spot Fleet cancels any open requests that exceed the new target capacity. You can request that the Spot Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the Spot Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the Spot Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your Spot Fleet for now, but will use it again later, you can set the target capacity to 0.

fn modify_spot_fleet_request( &self, input: ModifySpotFleetRequestRequest, @@ -77200,7 +82445,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77241,6 +82486,149 @@ impl Ec2 for Ec2Client { }) } + ///

Allows or restricts mirroring network services.

By default, Amazon DNS network services are not eligible for Traffic Mirror. Use AddNetworkServices to add network services to a Traffic Mirror filter. When a network service is added to the Traffic Mirror filter, all traffic related to that network service will be mirrored. When you no longer want to mirror network services, use RemoveNetworkServices to remove the network services from the Traffic Mirror filter.

FFor information about filter rule properties, see Network Services in the Traffic Mirroring User Guide .

+ fn modify_traffic_mirror_filter_network_services( + &self, + input: ModifyTrafficMirrorFilterNetworkServicesRequest, + ) -> RusotoFuture< + ModifyTrafficMirrorFilterNetworkServicesResult, + ModifyTrafficMirrorFilterNetworkServicesError, + > { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "ModifyTrafficMirrorFilterNetworkServices"); + params.put("Version", "2016-11-15"); + ModifyTrafficMirrorFilterNetworkServicesRequestSerializer::serialize( + &mut params, + "", + &input, + ); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(ModifyTrafficMirrorFilterNetworkServicesError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = ModifyTrafficMirrorFilterNetworkServicesResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = + ModifyTrafficMirrorFilterNetworkServicesResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Modifies the specified Traffic Mirror rule.

DestinationCidrBlock and SourceCidrBlock must both be an IPv4 range or an IPv6 range.

+ fn modify_traffic_mirror_filter_rule( + &self, + input: ModifyTrafficMirrorFilterRuleRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "ModifyTrafficMirrorFilterRule"); + params.put("Version", "2016-11-15"); + ModifyTrafficMirrorFilterRuleRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(ModifyTrafficMirrorFilterRuleError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = ModifyTrafficMirrorFilterRuleResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = ModifyTrafficMirrorFilterRuleResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Modifies a Traffic Mirror session.

+ fn modify_traffic_mirror_session( + &self, + input: ModifyTrafficMirrorSessionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "ModifyTrafficMirrorSession"); + params.put("Version", "2016-11-15"); + ModifyTrafficMirrorSessionRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(ModifyTrafficMirrorSessionError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = ModifyTrafficMirrorSessionResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = ModifyTrafficMirrorSessionResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Modifies the specified VPC attachment.

fn modify_transit_gateway_vpc_attachment( &self, @@ -77273,7 +82661,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77289,7 +82677,7 @@ impl Ec2 for Ec2Client { }) } - ///

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using the DescribeVolumesModifications API. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

+ ///

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

fn modify_volume( &self, input: ModifyVolumeRequest, @@ -77321,7 +82709,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77388,7 +82776,7 @@ impl Ec2 for Ec2Client { }) } - ///

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

+ ///

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

fn modify_vpc_endpoint( &self, input: ModifyVpcEndpointRequest, @@ -77420,7 +82808,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77474,7 +82862,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77525,7 +82913,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77541,7 +82929,7 @@ impl Ec2 for Ec2Client { }) } - ///

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

+ ///

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

fn modify_vpc_endpoint_service_permissions( &self, input: ModifyVpcEndpointServicePermissionsRequest, @@ -77575,7 +82963,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77623,7 +83011,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77671,7 +83059,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77687,7 +83075,7 @@ impl Ec2 for Ec2Client { }) } - ///

Modifies the target gateway of a AWS Site-to-Site VPN connection. The following migration options are available:

  • An existing virtual private gateway to a new virtual private gateway

  • An existing virtual private gateway to a transit gateway

  • An existing transit gateway to a new transit gateway

  • An existing transit gateway to a virtual private gateway

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your s2slong; connection will be temporarily unavailable for approximately 10 minutes while we provision the new endpoints

+ ///

Modifies the target gateway of an AWS Site-to-Site VPN connection. The following migration options are available:

  • An existing virtual private gateway to a new virtual private gateway

  • An existing virtual private gateway to a transit gateway

  • An existing transit gateway to a new transit gateway

  • An existing transit gateway to a virtual private gateway

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your s2slong; connection will be temporarily unavailable for approximately 10 minutes while we provision the new endpoints

fn modify_vpn_connection( &self, input: ModifyVpnConnectionRequest, @@ -77718,7 +83106,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77734,6 +83122,96 @@ impl Ec2 for Ec2Client { }) } + ///

Modifies the VPN tunnel endpoint certificate.

+ fn modify_vpn_tunnel_certificate( + &self, + input: ModifyVpnTunnelCertificateRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "ModifyVpnTunnelCertificate"); + params.put("Version", "2016-11-15"); + ModifyVpnTunnelCertificateRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(ModifyVpnTunnelCertificateError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = ModifyVpnTunnelCertificateResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = ModifyVpnTunnelCertificateResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Modifies the options for a VPN tunnel in an AWS Site-to-Site VPN connection. You can modify multiple options for a tunnel in a single request, but you can only modify one tunnel at a time. For more information, see Site-to-Site VPN Tunnel Options for Your Site-to-Site VPN Connection in the AWS Site-to-Site VPN User Guide.

+ fn modify_vpn_tunnel_options( + &self, + input: ModifyVpnTunnelOptionsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "ModifyVpnTunnelOptions"); + params.put("Version", "2016-11-15"); + ModifyVpnTunnelOptionsRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(ModifyVpnTunnelOptionsError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = ModifyVpnTunnelOptionsResult::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + result = ModifyVpnTunnelOptionsResultDeserializer::deserialize( + &actual_tag_name, + &mut stack, + )?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

To disable detailed monitoring, see .

fn monitor_instances( &self, @@ -77766,7 +83244,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77814,7 +83292,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77862,7 +83340,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77907,7 +83385,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -77955,7 +83433,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78000,7 +83478,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78076,7 +83554,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78122,7 +83600,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78167,7 +83645,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78212,7 +83690,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78288,7 +83766,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78336,7 +83814,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78352,7 +83830,7 @@ impl Ec2 for Ec2Client { }) } - ///

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

+ ///

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

fn replace_network_acl_association( &self, input: ReplaceNetworkAclAssociationRequest, @@ -78381,7 +83859,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78397,7 +83875,7 @@ impl Ec2 for Ec2Client { }) } - ///

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

+ ///

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

fn replace_network_acl_entry( &self, input: ReplaceNetworkAclEntryRequest, @@ -78422,7 +83900,7 @@ impl Ec2 for Ec2Client { }) } - ///

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

+ ///

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

fn replace_route(&self, input: ReplaceRouteRequest) -> RusotoFuture<(), ReplaceRouteError> { let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); let mut params = Params::new(); @@ -78447,7 +83925,7 @@ impl Ec2 for Ec2Client { }) } - ///

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

+ ///

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

fn replace_route_table_association( &self, input: ReplaceRouteTableAssociationRequest, @@ -78476,7 +83954,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78521,7 +83999,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78564,7 +84042,7 @@ impl Ec2 for Ec2Client { }) } - ///

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Instances. You cannot tag other resource types in a Spot Fleet request because only the instance resource type is supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

+ ///

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot Instance pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Instances. You cannot tag other resource types in a Spot Fleet request because only the instance resource type is supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

fn request_spot_fleet( &self, input: RequestSpotFleetRequest, @@ -78596,7 +84074,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78643,7 +84121,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78659,7 +84137,7 @@ impl Ec2 for Ec2Client { }) } - ///

Resets the account's default customer master key (CMK) to the account's AWS-managed default CMK. This default CMK is used to encrypt EBS volumes when you have enabled EBS encryption by default without specifying a CMK in the API call. If you have not enabled encryption by default, then this CMK is used when you set the Encrypted parameter to true without specifying a custom CMK in the API call.

Call this API if you have modified the default CMK that is used for encrypting your EBS volume using ModifyEbsDefaultKmsKeyId and you want to reset it to the AWS-managed default CMK. After resetting, you can continue to provide a CMK of your choice in the API call that creates the volume. However, if no CMK is specified, your account will encrypt the volume to the AWS-managed default CMK.

+ ///

Resets the default customer master key (CMK) for EBS encryption for your account in this Region to the AWS managed CMK for EBS.

After resetting the default CMK to the AWS managed CMK, you can continue to encrypt by a customer managed CMK by specifying it when you create the volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

fn reset_ebs_default_kms_key_id( &self, input: ResetEbsDefaultKmsKeyIdRequest, @@ -78688,7 +84166,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78733,7 +84211,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78880,7 +84358,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -78925,7 +84403,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79023,7 +84501,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79065,7 +84543,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79110,7 +84588,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79126,6 +84604,31 @@ impl Ec2 for Ec2Client { }) } + ///

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic Interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

+ fn send_diagnostic_interrupt( + &self, + input: SendDiagnosticInterruptRequest, + ) -> RusotoFuture<(), SendDiagnosticInterruptError> { + let mut request = SignedRequest::new("POST", "ec2", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "SendDiagnosticInterrupt"); + params.put("Version", "2016-11-15"); + SendDiagnosticInterruptRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(SendDiagnosticInterruptError::from_response(response)) + })); + } + + Box::new(future::ok(::std::mem::drop(response))) + }) + } + ///

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

fn start_instances( &self, @@ -79158,7 +84661,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79206,7 +84709,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79249,7 +84752,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79297,7 +84800,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79342,7 +84845,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79415,7 +84918,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79467,7 +84970,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79520,7 +85023,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -79569,7 +85072,7 @@ impl Ec2 for Ec2Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/ec2/src/lib.rs b/rusoto/services/ec2/src/lib.rs index e41008eceac..7cf192e018f 100644 --- a/rusoto/services/ec2/src/lib.rs +++ b/rusoto/services/ec2/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster.

To learn more about Amazon EC2, Amazon EBS, and Amazon VPC, see the following resources:

+//!

Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster.

To learn more, see the following resources:

//! //! If you're using the service, you're probably looking for [Ec2Client](struct.Ec2Client.html) and [Ec2](trait.Ec2.html). diff --git a/rusoto/services/ecr/Cargo.toml b/rusoto/services/ecr/Cargo.toml index 0cb8a5230f8..44c3af15a36 100644 --- a/rusoto/services/ecr/Cargo.toml +++ b/rusoto/services/ecr/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ecr" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ecr/README.md b/rusoto/services/ecr/README.md index 1b5840316bb..37b6a773285 100644 --- a/rusoto/services/ecr/README.md +++ b/rusoto/services/ecr/README.md @@ -23,9 +23,16 @@ To use `rusoto_ecr` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_ecr = "0.40.0" +rusoto_ecr = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ecr/src/custom/mod.rs b/rusoto/services/ecr/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ecr/src/custom/mod.rs +++ b/rusoto/services/ecr/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ecr/src/generated.rs b/rusoto/services/ecr/src/generated.rs index e29370cb4fe..00ae4510635 100644 --- a/rusoto/services/ecr/src/generated.rs +++ b/rusoto/services/ecr/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An object representing authorization data for an Amazon ECR registry.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthorizationData { ///

A base64-encoded string that contains authorization data for the specified Amazon ECR registry. When the string is decoded, it is presented in the format user:password for private registry authentication using docker login.

#[serde(rename = "authorizationToken")] @@ -57,7 +56,7 @@ pub struct BatchCheckLayerAvailabilityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchCheckLayerAvailabilityResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -85,7 +84,7 @@ pub struct BatchDeleteImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteImageResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -116,7 +115,7 @@ pub struct BatchGetImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetImageResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -146,7 +145,7 @@ pub struct CompleteLayerUploadRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompleteLayerUploadResponse { ///

The sha256 digest of the image layer.

#[serde(rename = "layerDigest")] @@ -168,17 +167,21 @@ pub struct CompleteLayerUploadResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateRepositoryRequest { + ///

The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

+ #[serde(rename = "imageTagMutability")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_tag_mutability: Option, ///

The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app).

#[serde(rename = "repositoryName")] pub repository_name: String, - ///

+ ///

The metadata that you apply to the repository to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRepositoryResponse { ///

The repository that was created.

#[serde(rename = "repository")] @@ -198,7 +201,7 @@ pub struct DeleteLifecyclePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLifecyclePolicyResponse { ///

The time stamp of the last time that the lifecycle policy was run.

#[serde(rename = "lastEvaluatedAt")] @@ -230,7 +233,7 @@ pub struct DeleteRepositoryPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRepositoryPolicyResponse { ///

The JSON repository policy that was deleted from the repository.

#[serde(rename = "policyText")] @@ -262,7 +265,7 @@ pub struct DeleteRepositoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRepositoryResponse { ///

The repository that was deleted.

#[serde(rename = "repository")] @@ -301,13 +304,13 @@ pub struct DescribeImagesRequest { #[serde(rename = "registryId")] #[serde(skip_serializing_if = "Option::is_none")] pub registry_id: Option, - ///

A list of repositories to describe.

+ ///

The repository that contains the images to describe.

#[serde(rename = "repositoryName")] pub repository_name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeImagesResponse { ///

A list of ImageDetail objects that contain data about the image.

#[serde(rename = "imageDetails")] @@ -340,7 +343,7 @@ pub struct DescribeRepositoriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRepositoriesResponse { ///

The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -361,7 +364,7 @@ pub struct GetAuthorizationTokenRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAuthorizationTokenResponse { ///

A list of authorization token data objects that correspond to the registryIds values in the request.

#[serde(rename = "authorizationData")] @@ -384,7 +387,7 @@ pub struct GetDownloadUrlForLayerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDownloadUrlForLayerResponse { ///

The pre-signed Amazon S3 download URL for the requested layer.

#[serde(rename = "downloadUrl")] @@ -424,7 +427,7 @@ pub struct GetLifecyclePolicyPreviewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLifecyclePolicyPreviewResponse { ///

The JSON lifecycle policy text.

#[serde(rename = "lifecyclePolicyText")] @@ -468,7 +471,7 @@ pub struct GetLifecyclePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLifecyclePolicyResponse { ///

The time stamp of the last time that the lifecycle policy was run.

#[serde(rename = "lastEvaluatedAt")] @@ -500,7 +503,7 @@ pub struct GetRepositoryPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRepositoryPolicyResponse { ///

The JSON repository policy text associated with the repository.

#[serde(rename = "policyText")] @@ -518,7 +521,7 @@ pub struct GetRepositoryPolicyResponse { ///

An object representing an Amazon ECR image.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Image { ///

An object containing the image tag and image digest associated with an image.

#[serde(rename = "imageId")] @@ -540,7 +543,7 @@ pub struct Image { ///

An object that describes an image returned by a DescribeImages operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImageDetail { ///

The sha256 digest of the image manifest.

#[serde(rename = "imageDigest")] @@ -570,7 +573,7 @@ pub struct ImageDetail { ///

An object representing an Amazon ECR image failure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImageFailure { ///

The code associated with the failure.

#[serde(rename = "failureCode")] @@ -611,7 +614,7 @@ pub struct InitiateLayerUploadRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateLayerUploadResponse { ///

The size, in bytes, that Amazon ECR expects future layer part uploads to be.

#[serde(rename = "partSize")] @@ -625,7 +628,7 @@ pub struct InitiateLayerUploadResponse { ///

An object representing an Amazon ECR image layer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Layer { ///

The availability status of the image layer.

#[serde(rename = "layerAvailability")] @@ -647,7 +650,7 @@ pub struct Layer { ///

An object representing an Amazon ECR image layer failure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LayerFailure { ///

The failure code associated with the failure.

#[serde(rename = "failureCode")] @@ -674,7 +677,7 @@ pub struct LifecyclePolicyPreviewFilter { ///

The result of the lifecycle policy preview.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LifecyclePolicyPreviewResult { ///

The type of action to be taken.

#[serde(rename = "action")] @@ -700,7 +703,7 @@ pub struct LifecyclePolicyPreviewResult { ///

The summary of the lifecycle policy preview request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LifecyclePolicyPreviewSummary { ///

The number of expiring images.

#[serde(rename = "expiringImageTotalCount")] @@ -710,7 +713,7 @@ pub struct LifecyclePolicyPreviewSummary { ///

The type of action to be taken.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LifecyclePolicyRuleAction { ///

The type of action to be taken.

#[serde(rename = "type")] @@ -751,7 +754,7 @@ pub struct ListImagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListImagesResponse { ///

The list of image IDs for the requested repository.

#[serde(rename = "imageIds")] @@ -771,7 +774,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags for the resource.

#[serde(rename = "tags")] @@ -798,7 +801,7 @@ pub struct PutImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutImageResponse { ///

Details of the image uploaded.

#[serde(rename = "image")] @@ -806,6 +809,37 @@ pub struct PutImageResponse { pub image: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutImageTagMutabilityRequest { + ///

The tag mutability setting for the repository. If MUTABLE is specified, image tags can be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

+ #[serde(rename = "imageTagMutability")] + pub image_tag_mutability: String, + ///

The AWS account ID associated with the registry that contains the repository in which to update the image tag mutability settings. If you do not specify a registry, the default registry is assumed.

+ #[serde(rename = "registryId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub registry_id: Option, + ///

The name of the repository in which to update the image tag mutability settings.

+ #[serde(rename = "repositoryName")] + pub repository_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutImageTagMutabilityResponse { + ///

The image tag mutability setting for the repository.

+ #[serde(rename = "imageTagMutability")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_tag_mutability: Option, + ///

The registry ID associated with the request.

+ #[serde(rename = "registryId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub registry_id: Option, + ///

The repository name associated with the request.

+ #[serde(rename = "repositoryName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub repository_name: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutLifecyclePolicyRequest { ///

The JSON repository policy text to apply to the repository.

@@ -821,7 +855,7 @@ pub struct PutLifecyclePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLifecyclePolicyResponse { ///

The JSON repository policy text.

#[serde(rename = "lifecyclePolicyText")] @@ -839,12 +873,16 @@ pub struct PutLifecyclePolicyResponse { ///

An object representing a repository.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Repository { ///

The date and time, in JavaScript date format, when the repository was created.

#[serde(rename = "createdAt")] #[serde(skip_serializing_if = "Option::is_none")] pub created_at: Option, + ///

The tag mutability setting for the repository.

+ #[serde(rename = "imageTagMutability")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_tag_mutability: Option, ///

The AWS account ID associated with the registry that contains the repository.

#[serde(rename = "registryId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -869,7 +907,7 @@ pub struct SetRepositoryPolicyRequest { #[serde(rename = "force")] #[serde(skip_serializing_if = "Option::is_none")] pub force: Option, - ///

The JSON repository policy text to apply to the repository.

+ ///

The JSON repository policy text to apply to the repository. For more information, see Amazon ECR Repository Policy Examples in the Amazon Elastic Container Registry User Guide.

#[serde(rename = "policyText")] pub policy_text: String, ///

The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default registry is assumed.

@@ -882,7 +920,7 @@ pub struct SetRepositoryPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetRepositoryPolicyResponse { ///

The JSON repository policy text applied to the repository.

#[serde(rename = "policyText")] @@ -914,7 +952,7 @@ pub struct StartLifecyclePolicyPreviewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartLifecyclePolicyPreviewResponse { ///

The JSON repository policy text.

#[serde(rename = "lifecyclePolicyText")] @@ -958,7 +996,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -972,7 +1010,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1004,7 +1042,7 @@ pub struct UploadLayerPartRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UploadLayerPartResponse { ///

The integer value of the last byte received in the request.

#[serde(rename = "lastByteReceived")] @@ -1255,7 +1293,7 @@ pub enum CreateRepositoryError { InvalidParameter(String), ///

An invalid parameter has been specified. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

InvalidTagParameter(String), - ///

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

+ ///

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

LimitExceeded(String), ///

The specified repository already exists in the specified registry.

RepositoryAlreadyExists(String), @@ -2001,11 +2039,13 @@ impl Error for ListTagsForResourceError { pub enum PutImageError { ///

The specified image has already been pushed, and there were no changes to the manifest or image tag after the last push.

ImageAlreadyExists(String), + ///

The specified image is tagged with a tag that already exists. The repository is configured for tag immutability.

+ ImageTagAlreadyExists(String), ///

The specified parameter is invalid. Review the available parameters for the API request.

InvalidParameter(String), ///

The specified layers could not be found, or the specified layer is not valid for this repository.

LayersNotFound(String), - ///

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

+ ///

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

LimitExceeded(String), ///

The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

RepositoryNotFound(String), @@ -2020,6 +2060,9 @@ impl PutImageError { "ImageAlreadyExistsException" => { return RusotoError::Service(PutImageError::ImageAlreadyExists(err.msg)) } + "ImageTagAlreadyExistsException" => { + return RusotoError::Service(PutImageError::ImageTagAlreadyExists(err.msg)) + } "InvalidParameterException" => { return RusotoError::Service(PutImageError::InvalidParameter(err.msg)) } @@ -2049,6 +2092,7 @@ impl Error for PutImageError { fn description(&self) -> &str { match *self { PutImageError::ImageAlreadyExists(ref cause) => cause, + PutImageError::ImageTagAlreadyExists(ref cause) => cause, PutImageError::InvalidParameter(ref cause) => cause, PutImageError::LayersNotFound(ref cause) => cause, PutImageError::LimitExceeded(ref cause) => cause, @@ -2057,6 +2101,55 @@ impl Error for PutImageError { } } } +/// Errors returned by PutImageTagMutability +#[derive(Debug, PartialEq)] +pub enum PutImageTagMutabilityError { + ///

The specified parameter is invalid. Review the available parameters for the API request.

+ InvalidParameter(String), + ///

The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

+ RepositoryNotFound(String), + ///

These errors are usually caused by a server-side issue.

+ Server(String), +} + +impl PutImageTagMutabilityError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(PutImageTagMutabilityError::InvalidParameter( + err.msg, + )) + } + "RepositoryNotFoundException" => { + return RusotoError::Service(PutImageTagMutabilityError::RepositoryNotFound( + err.msg, + )) + } + "ServerException" => { + return RusotoError::Service(PutImageTagMutabilityError::Server(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutImageTagMutabilityError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutImageTagMutabilityError { + fn description(&self) -> &str { + match *self { + PutImageTagMutabilityError::InvalidParameter(ref cause) => cause, + PutImageTagMutabilityError::RepositoryNotFound(ref cause) => cause, + PutImageTagMutabilityError::Server(ref cause) => cause, + } + } +} /// Errors returned by PutLifecyclePolicy #[derive(Debug, PartialEq)] pub enum PutLifecyclePolicyError { @@ -2341,7 +2434,7 @@ pub enum UploadLayerPartError { InvalidLayerPart(String), ///

The specified parameter is invalid. Review the available parameters for the API request.

InvalidParameter(String), - ///

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

+ ///

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

LimitExceeded(String), ///

The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

RepositoryNotFound(String), @@ -2510,13 +2603,19 @@ pub trait Ecr { ///

Creates or updates the image manifest and tags associated with an image.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

fn put_image(&self, input: PutImageRequest) -> RusotoFuture; - ///

Creates or updates a lifecycle policy. For information about lifecycle policy syntax, see Lifecycle Policy Template.

+ ///

Updates the image tag mutability settings for a repository.

+ fn put_image_tag_mutability( + &self, + input: PutImageTagMutabilityRequest, + ) -> RusotoFuture; + + ///

Creates or updates a lifecycle policy. For information about lifecycle policy syntax, see Lifecycle Policy Template.

fn put_lifecycle_policy( &self, input: PutLifecyclePolicyRequest, ) -> RusotoFuture; - ///

Applies a repository policy on a specified repository to control access permissions.

+ ///

Applies a repository policy on a specified repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

fn set_repository_policy( &self, input: SetRepositoryPolicyRequest, @@ -2558,10 +2657,7 @@ impl EcrClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> EcrClient { - EcrClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2575,10 +2671,14 @@ impl EcrClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - EcrClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EcrClient { + EcrClient { client, region } } } @@ -3173,7 +3273,38 @@ impl Ecr for EcrClient { }) } - ///

Creates or updates a lifecycle policy. For information about lifecycle policy syntax, see Lifecycle Policy Template.

+ ///

Updates the image tag mutability settings for a repository.

+ fn put_image_tag_mutability( + &self, + input: PutImageTagMutabilityRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ecr", &self.region, "/"); + request.set_endpoint_prefix("api.ecr".to_string()); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AmazonEC2ContainerRegistry_V20150921.PutImageTagMutability", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(PutImageTagMutabilityError::from_response(response)) + }), + ) + } + }) + } + + ///

Creates or updates a lifecycle policy. For information about lifecycle policy syntax, see Lifecycle Policy Template.

fn put_lifecycle_policy( &self, input: PutLifecyclePolicyRequest, @@ -3205,7 +3336,7 @@ impl Ecr for EcrClient { }) } - ///

Applies a repository policy on a specified repository to control access permissions.

+ ///

Applies a repository policy on a specified repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

fn set_repository_policy( &self, input: SetRepositoryPolicyRequest, diff --git a/rusoto/services/ecr/src/lib.rs b/rusoto/services/ecr/src/lib.rs index 1baa547397b..98c5d8c83b9 100644 --- a/rusoto/services/ecr/src/lib.rs +++ b/rusoto/services/ecr/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry service. Customers can use the familiar Docker CLI to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR supports private Docker repositories with resource-based permissions using IAM so that specific users or Amazon EC2 instances can access repositories and images. Developers can use the Docker CLI to author and manage images.

+//!

Amazon Elastic Container Registry

Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry service. Customers can use the familiar Docker CLI to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR supports private Docker repositories with resource-based permissions using IAM so that specific users or Amazon EC2 instances can access repositories and images. Developers can use the Docker CLI to author and manage images.

//! //! If you're using the service, you're probably looking for [EcrClient](struct.EcrClient.html) and [Ecr](trait.Ecr.html). diff --git a/rusoto/services/ecs/Cargo.toml b/rusoto/services/ecs/Cargo.toml index a37212f731f..76b459fe67c 100644 --- a/rusoto/services/ecs/Cargo.toml +++ b/rusoto/services/ecs/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ecs" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ecs/README.md b/rusoto/services/ecs/README.md index 6dec2624454..367e01fac26 100644 --- a/rusoto/services/ecs/README.md +++ b/rusoto/services/ecs/README.md @@ -23,9 +23,16 @@ To use `rusoto_ecs` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_ecs = "0.40.0" +rusoto_ecs = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ecs/src/custom/mod.rs b/rusoto/services/ecs/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ecs/src/custom/mod.rs +++ b/rusoto/services/ecs/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ecs/src/generated.rs b/rusoto/services/ecs/src/generated.rs index eac6b11d05b..4795ac7628a 100644 --- a/rusoto/services/ecs/src/generated.rs +++ b/rusoto/services/ecs/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An object representing a container instance or task attachment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Attachment { ///

Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.

#[serde(rename = "details")] @@ -95,7 +94,7 @@ pub struct AwsVpcConfiguration { ///

A regional grouping of one or more container instances on which you can run task requests. Each account receives a default cluster the first time you use the Amazon ECS service, but you may also create other clusters. Clusters may contain more than one instance type simultaneously.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Cluster { ///

The number of services that are running on the cluster in an ACTIVE state. You can view these services with ListServices.

#[serde(rename = "activeServicesCount")] @@ -121,6 +120,10 @@ pub struct Cluster { #[serde(rename = "runningTasksCount")] #[serde(skip_serializing_if = "Option::is_none")] pub running_tasks_count: Option, + ///

The settings for the cluster. This parameter indicates whether CloudWatch Container Insights is enabled or disabled for a cluster.

+ #[serde(rename = "settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub settings: Option>, ///

Additional information about your clusters that are separated by launch type, including:

  • runningEC2TasksCount

  • RunningFargateTasksCount

  • pendingEC2TasksCount

  • pendingFargateTasksCount

  • activeEC2ServiceCount

  • activeFargateServiceCount

  • drainingEC2ServiceCount

  • drainingFargateServiceCount

#[serde(rename = "statistics")] #[serde(skip_serializing_if = "Option::is_none")] @@ -129,15 +132,28 @@ pub struct Cluster { #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - ///

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } +///

The settings to use when creating a cluster. This parameter is used to enable CloudWatch Container Insights for a cluster.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClusterSetting { + ///

The name of the cluster setting. The only supported value is containerInsights.

+ #[serde(rename = "name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The value to set for the cluster setting. The supported values are enabled and disabled. If enabled is specified, CloudWatch Container Insights will be enabled for the cluster, otherwise it will be disabled unless the containerInsights account setting is enabled. If a cluster value is specified, it will override the containerInsights value set with PutAccountSetting or PutAccountSettingDefault.

+ #[serde(rename = "value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + ///

A Docker container that is part of a task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Container { ///

The Amazon Resource Name (ARN) of the container.

#[serde(rename = "containerArn")] @@ -151,6 +167,10 @@ pub struct Container { #[serde(rename = "exitCode")] #[serde(skip_serializing_if = "Option::is_none")] pub exit_code: Option, + ///

The FireLens configuration for the container.

+ #[serde(rename = "firelensConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub firelens_configuration: Option, ///

The IDs of each GPU assigned to the container.

#[serde(rename = "gpuIds")] #[serde(skip_serializing_if = "Option::is_none")] @@ -187,6 +207,10 @@ pub struct Container { #[serde(rename = "reason")] #[serde(skip_serializing_if = "Option::is_none")] pub reason: Option, + ///

The ID of the Docker container.

+ #[serde(rename = "runtimeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub runtime_id: Option, ///

The ARN of the task.

#[serde(rename = "taskArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -244,6 +268,10 @@ pub struct ContainerDefinition { #[serde(rename = "extraHosts")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_hosts: Option>, + ///

The FireLens configuration for the container. This is used to specify and configure a log router for container logs.

+ #[serde(rename = "firelensConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub firelens_configuration: Option, ///

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

#[serde(rename = "healthCheck")] #[serde(skip_serializing_if = "Option::is_none")] @@ -268,15 +296,15 @@ pub struct ContainerDefinition { #[serde(rename = "linuxParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub linux_parameters: Option, - ///

The log configuration specification for the container.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, syslog, gelf, fluentd, splunk, journald, and json-file.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECSAVAILABLELOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

+ ///

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECSAVAILABLELOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

#[serde(rename = "logConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub log_configuration: Option, - ///

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

+ ///

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

#[serde(rename = "memory")] #[serde(skip_serializing_if = "Option::is_none")] pub memory: Option, - ///

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

+ ///

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

#[serde(rename = "memoryReservation")] #[serde(skip_serializing_if = "Option::is_none")] pub memory_reservation: Option, @@ -316,11 +344,11 @@ pub struct ContainerDefinition { #[serde(rename = "secrets")] #[serde(skip_serializing_if = "Option::is_none")] pub secrets: Option>, - ///

Time duration to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

+ ///

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

#[serde(rename = "startTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub start_timeout: Option, - ///

Time duration to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes. This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

+ ///

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes. This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

#[serde(rename = "stopTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub stop_timeout: Option, @@ -359,7 +387,7 @@ pub struct ContainerDependency { ///

An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContainerInstance { ///

This parameter returns true if the agent is connected to Amazon ECS. Registered instances with an agent that may be unhealthy or stopped return false. Only instances connected to an agent can accept placement requests.

#[serde(rename = "agentConnected")] @@ -413,7 +441,7 @@ pub struct ContainerInstance { #[serde(rename = "statusReason")] #[serde(skip_serializing_if = "Option::is_none")] pub status_reason: Option, - ///

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -479,6 +507,10 @@ pub struct ContainerStateChange { #[serde(rename = "reason")] #[serde(skip_serializing_if = "Option::is_none")] pub reason: Option, + ///

The ID of the Docker container.

+ #[serde(rename = "runtimeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub runtime_id: Option, ///

The status of the container.

#[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] @@ -491,14 +523,18 @@ pub struct CreateClusterRequest { #[serde(rename = "clusterName")] #[serde(skip_serializing_if = "Option::is_none")] pub cluster_name: Option, - ///

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The setting to use when creating a cluster. This parameter is used to enable CloudWatch Container Insights for a cluster. If this value is specified, it will override the containerInsights value set with PutAccountSetting or PutAccountSettingDefault.

+ #[serde(rename = "settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub settings: Option>, + ///

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClusterResponse { ///

The full description of your new cluster.

#[serde(rename = "cluster")] @@ -524,7 +560,7 @@ pub struct CreateServiceRequest { #[serde(rename = "deploymentController")] #[serde(skip_serializing_if = "Option::is_none")] pub deployment_controller: Option, - ///

The number of instantiations of the specified task definition to place and keep running on your cluster.

+ ///

The number of instantiations of the specified task definition to place and keep running on your cluster.

This is required if schedulingStrategy is REPLICA or is not specified. If schedulingStrategy is DAEMON then this is not required.

#[serde(rename = "desiredCount")] #[serde(skip_serializing_if = "Option::is_none")] pub desired_count: Option, @@ -540,7 +576,7 @@ pub struct CreateServiceRequest { #[serde(rename = "launchType")] #[serde(skip_serializing_if = "Option::is_none")] pub launch_type: Option, - ///

A load balancer object representing the load balancer to use with your service.

If the service is using the ECS deployment controller, you are limited to one load balancer or target group.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

+ ///

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the service.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

#[serde(rename = "loadBalancers")] #[serde(skip_serializing_if = "Option::is_none")] pub load_balancers: Option>, @@ -579,7 +615,7 @@ pub struct CreateServiceRequest { #[serde(rename = "serviceRegistries")] #[serde(skip_serializing_if = "Option::is_none")] pub service_registries: Option>, - ///

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -590,7 +626,7 @@ pub struct CreateServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateServiceResponse { ///

The full description of your service following the create call.

If a service is using the ECS deployment controller, the deploymentController and taskSets parameters will not be returned.

If the service is using the CODE_DEPLOY deployment controller, the deploymentController, taskSets and deployments parameters will be returned, however the deployments parameter will be an empty list.

#[serde(rename = "service")] @@ -642,7 +678,7 @@ pub struct CreateTaskSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTaskSetResponse { #[serde(rename = "taskSet")] #[serde(skip_serializing_if = "Option::is_none")] @@ -661,7 +697,7 @@ pub struct DeleteAccountSettingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAccountSettingResponse { ///

The account setting for the specified principal ARN.

#[serde(rename = "setting")] @@ -681,7 +717,7 @@ pub struct DeleteAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAttributesResponse { ///

A list of attribute objects that were successfully deleted from your resource.

#[serde(rename = "attributes")] @@ -697,7 +733,7 @@ pub struct DeleteClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteClusterResponse { ///

The full description of the deleted cluster.

#[serde(rename = "cluster")] @@ -721,7 +757,7 @@ pub struct DeleteServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteServiceResponse { ///

The full description of the deleted service.

#[serde(rename = "service")] @@ -747,7 +783,7 @@ pub struct DeleteTaskSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTaskSetResponse { #[serde(rename = "taskSet")] #[serde(skip_serializing_if = "Option::is_none")] @@ -756,7 +792,7 @@ pub struct DeleteTaskSetResponse { ///

The details of an Amazon ECS service deployment. This is used only when a service uses the ECS deployment controller type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Deployment { ///

The Unix timestamp for when the service deployment was created.

#[serde(rename = "createdAt")] @@ -841,7 +877,7 @@ pub struct DeregisterContainerInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterContainerInstanceResponse { ///

The container instance that was deregistered.

#[serde(rename = "containerInstance")] @@ -857,7 +893,7 @@ pub struct DeregisterTaskDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterTaskDefinitionResponse { ///

The full description of the deregistered task.

#[serde(rename = "taskDefinition")] @@ -878,7 +914,7 @@ pub struct DescribeClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClustersResponse { ///

The list of clusters.

#[serde(rename = "clusters")] @@ -906,7 +942,7 @@ pub struct DescribeContainerInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeContainerInstancesResponse { ///

The list of container instances.

#[serde(rename = "containerInstances")] @@ -934,7 +970,7 @@ pub struct DescribeServicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeServicesResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -958,9 +994,9 @@ pub struct DescribeTaskDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTaskDefinitionResponse { - ///

The metadata that is applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that is applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -985,7 +1021,7 @@ pub struct DescribeTaskSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTaskSetsResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -1013,7 +1049,7 @@ pub struct DescribeTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTasksResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -1054,7 +1090,7 @@ pub struct DiscoverPollEndpointRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiscoverPollEndpointResponse { ///

The endpoint for the Amazon ECS agent to poll.

#[serde(rename = "endpoint")] @@ -1093,7 +1129,7 @@ pub struct DockerVolumeConfiguration { ///

A failed resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Failure { ///

The Amazon Resource Name (ARN) of the failed resource.

#[serde(rename = "arn")] @@ -1105,6 +1141,18 @@ pub struct Failure { pub reason: Option, } +///

The FireLens configuration for the container. This is used to specify and configure a log router for container logs.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FirelensConfiguration { + ///

The options to use when configuring the log router. This field is optional and can be used to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is "options":{"enable-ecs-log-metadata":"true|false"}.

+ #[serde(rename = "options")] + #[serde(skip_serializing_if = "Option::is_none")] + pub options: Option<::std::collections::HashMap>, + ///

The log router to use. The valid values are fluentd or fluentbit.

+ #[serde(rename = "type")] + pub type_: String, +} + ///

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

The following are notes about container health check support:

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS Container Agent.

  • Container health checks are supported for Fargate tasks if you are using platform version 1.1.0 or greater. For more information, see AWS Fargate Platform Versions.

  • Container health checks are not supported for tasks that are part of a service that is configured to use a Classic Load Balancer.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct HealthCheck { @@ -1149,6 +1197,30 @@ pub struct HostVolumeProperties { pub source_path: Option, } +///

Details on a Elastic Inference accelerator. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct InferenceAccelerator { + ///

The Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement.

+ #[serde(rename = "deviceName")] + pub device_name: String, + ///

The Elastic Inference accelerator type to use.

+ #[serde(rename = "deviceType")] + pub device_type: String, +} + +///

Details on an Elastic Inference accelerator task override. This parameter is used to override the Elastic Inference accelerator specified in the task definition. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct InferenceAcceleratorOverride { + ///

The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition.

+ #[serde(rename = "deviceName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub device_name: Option, + ///

The Elastic Inference accelerator type to use.

+ #[serde(rename = "deviceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub device_type: Option, +} + ///

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information on the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information on these Linux capabilities, see the capabilities(7) Linux manual page.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct KernelCapabilities { @@ -1190,10 +1262,18 @@ pub struct LinuxParameters { #[serde(rename = "initProcessEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub init_process_enabled: Option, + ///

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

If you are using tasks that use the Fargate launch type, the maxSwap parameter is not supported.

+ #[serde(rename = "maxSwap")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_swap: Option, ///

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

#[serde(rename = "sharedMemorySize")] #[serde(skip_serializing_if = "Option::is_none")] pub shared_memory_size: Option, + ///

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you are using tasks that use the Fargate launch type, the swappiness parameter is not supported.

+ #[serde(rename = "swappiness")] + #[serde(skip_serializing_if = "Option::is_none")] + pub swappiness: Option, ///

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

#[serde(rename = "tmpfs")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1229,7 +1309,7 @@ pub struct ListAccountSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAccountSettingsResponse { ///

The nextToken value to include in a future ListAccountSettings request. When the results of a ListAccountSettings request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -1269,7 +1349,7 @@ pub struct ListAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAttributesResponse { ///

A list of attribute objects that meet the criteria of the request.

#[serde(rename = "attributes")] @@ -1294,7 +1374,7 @@ pub struct ListClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClustersResponse { ///

The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.

#[serde(rename = "clusterArns")] @@ -1331,7 +1411,7 @@ pub struct ListContainerInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListContainerInstancesResponse { ///

The list of container instances with full ARN entries for each container instance associated with the specified cluster.

#[serde(rename = "containerInstanceArns")] @@ -1368,7 +1448,7 @@ pub struct ListServicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListServicesResponse { ///

The nextToken value to include in a future ListServices request. When the results of a ListServices request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -1388,7 +1468,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags for the resource.

#[serde(rename = "tags")] @@ -1417,7 +1497,7 @@ pub struct ListTaskDefinitionFamiliesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTaskDefinitionFamiliesResponse { ///

The list of task definition family names that match the ListTaskDefinitionFamilies request.

#[serde(rename = "families")] @@ -1454,7 +1534,7 @@ pub struct ListTaskDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTaskDefinitionsResponse { ///

The nextToken value to include in a future ListTaskDefinitions request. When the results of a ListTaskDefinitions request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -1507,7 +1587,7 @@ pub struct ListTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTasksResponse { ///

The nextToken value to include in a future ListTasks request. When the results of a ListTasks request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -1519,22 +1599,22 @@ pub struct ListTasksResponse { pub task_arns: Option>, } -///

Details on a load balancer to be used with a service or task set.

If the service is using the ECS deployment controller, you are limited to one load balancer or target group.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When you are creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). Each target group binds to a separate task set in the deployment. The load balancer can also have up to two listeners, a required listener for production traffic and an optional listener that allows you to test new revisions of the service before routing production traffic to it.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance. Tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

+///

Details on the load balancer or load balancers to use with a service or task set.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct LoadBalancer { ///

The name of the container (as it appears in a container definition) to associate with the load balancer.

#[serde(rename = "containerName")] #[serde(skip_serializing_if = "Option::is_none")] pub container_name: Option, - ///

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

+ ///

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they are launched on must allow ingress traffic on the hostPort of the port mapping.

#[serde(rename = "containerPort")] #[serde(skip_serializing_if = "Option::is_none")] pub container_port: Option, - ///

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a classic load balancer. If you are using an application load balancer or a network load balancer this should be omitted.

+ ///

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer this should be omitted.

#[serde(rename = "loadBalancerName")] #[serde(skip_serializing_if = "Option::is_none")] pub load_balancer_name: Option, - ///

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an application load balancer or a network load balancer. If you are using a classic load balancer this should be omitted.

For services using the ECS deployment controller, you are limited to one target group. For services using the CODE_DEPLOY deployment controller, you are required to define two target groups for the load balancer.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

+ ///

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a Classic Load Balancer this should be omitted.

For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering Multiple Target Groups with a Service in the Amazon Elastic Container Service Developer Guide.

For services using the CODE_DEPLOY deployment controller, you are required to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

#[serde(rename = "targetGroupArn")] #[serde(skip_serializing_if = "Option::is_none")] pub target_group_arn: Option, @@ -1543,7 +1623,7 @@ pub struct LoadBalancer { ///

Log configuration options to send to a custom log driver for the container.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct LogConfiguration { - ///

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, syslog, gelf, fluentd, splunk, journald, and json-file.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

+ ///

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

#[serde(rename = "logDriver")] pub log_driver: String, ///

The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

@@ -1605,7 +1685,7 @@ pub struct NetworkConfiguration { ///

An object representing the elastic network interface for tasks that use the awsvpc network mode.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkInterface { ///

The attachment ID for the network interface.

#[serde(rename = "attachmentId")] @@ -1621,14 +1701,14 @@ pub struct NetworkInterface { pub private_ipv_4_address: Option, } -///

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

+///

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

If you are using the Fargate launch type, task placement constraints are not supported.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PlacementConstraint { ///

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

#[serde(rename = "expression")] #[serde(skip_serializing_if = "Option::is_none")] pub expression: Option, - ///

The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates. The value distinctInstance is not supported in task definitions.

+ ///

The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -1693,7 +1773,7 @@ pub struct ProxyConfiguration { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutAccountSettingDefaultRequest { - ///

The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected.

+ ///

The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for CloudWatch Container Insights for your clusters is affected.

#[serde(rename = "name")] pub name: String, ///

The account setting value for the specified principal ARN. Accepted values are enabled and disabled.

@@ -1702,7 +1782,7 @@ pub struct PutAccountSettingDefaultRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAccountSettingDefaultResponse { #[serde(rename = "setting")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1711,7 +1791,7 @@ pub struct PutAccountSettingDefaultResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutAccountSettingRequest { - ///

The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected.

+ ///

The Amazon ECS resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for CloudWatch Container Insights for your clusters is affected.

#[serde(rename = "name")] pub name: String, ///

The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you specify the root user, it modifies the account setting for all IAM users, IAM roles, and the root user of the account unless an IAM user or role explicitly overrides these settings. If this field is omitted, the setting is changed only for the authenticated user.

@@ -1724,7 +1804,7 @@ pub struct PutAccountSettingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAccountSettingResponse { ///

The current account setting for a resource.

#[serde(rename = "setting")] @@ -1744,7 +1824,7 @@ pub struct PutAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAttributesResponse { ///

The attributes applied to your resource.

#[serde(rename = "attributes")] @@ -1778,7 +1858,7 @@ pub struct RegisterContainerInstanceRequest { #[serde(rename = "platformDevices")] #[serde(skip_serializing_if = "Option::is_none")] pub platform_devices: Option>, - ///

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -1793,7 +1873,7 @@ pub struct RegisterContainerInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterContainerInstanceResponse { ///

The container instance that was registered.

#[serde(rename = "containerInstance")] @@ -1817,6 +1897,10 @@ pub struct RegisterTaskDefinitionRequest { ///

You must specify a family for a task definition, which allows you to track multiple versions of the same task definition. The family is used as a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed.

#[serde(rename = "family")] pub family: String, + ///

The Elastic Inference accelerators to use for the containers in the task.

+ #[serde(rename = "inferenceAccelerators")] + #[serde(skip_serializing_if = "Option::is_none")] + pub inference_accelerators: Option>, ///

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

#[serde(rename = "ipcMode")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1844,7 +1928,7 @@ pub struct RegisterTaskDefinitionRequest { #[serde(rename = "requiresCompatibilities")] #[serde(skip_serializing_if = "Option::is_none")] pub requires_compatibilities: Option>, - ///

The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -1859,7 +1943,7 @@ pub struct RegisterTaskDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterTaskDefinitionResponse { ///

The list of tags associated with the task definition.

#[serde(rename = "tags")] @@ -1908,13 +1992,13 @@ pub struct Resource { pub type_: Option, } -///

The type and amount of a resource to assign to a container. The only supported resource is a GPU. For more information, see Working with GPUs on Amazon ECS in the Amazon Elastic Container Service Developer Guide

+///

The type and amount of a resource to assign to a container. The supported resource types are GPUs and Elastic Inference accelerators. For more information, see Working with GPUs on Amazon ECS or Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ResourceRequirement { - ///

The type of resource to assign to a container. The only supported value is GPU.

+ ///

The type of resource to assign to a container. The supported values are GPU or InferenceAccelerator.

#[serde(rename = "type")] pub type_: String, - ///

The number of physical GPUs the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.

+ ///

The value for the specified resource type.

If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.

If the InferenceAccelerator type is used, the value should match the deviceName for an InferenceAccelerator specified in a task definition.

#[serde(rename = "value")] pub value: String, } @@ -1969,7 +2053,7 @@ pub struct RunTaskRequest { #[serde(rename = "startedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub started_by: Option, - ///

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -1979,7 +2063,7 @@ pub struct RunTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RunTaskResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -2017,7 +2101,7 @@ pub struct Secret { ///

Details on a service within a cluster

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Service { ///

The Amazon Resource Name (ARN) of the cluster that hosts the service.

#[serde(rename = "clusterArn")] @@ -2035,7 +2119,7 @@ pub struct Service { #[serde(rename = "deploymentConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub deployment_configuration: Option, - ///

The deployment controller type the service is using.

+ ///

The deployment controller type the service is using. When using the DescribeServices API, this field is omitted if the service is using the ECS deployment controller type.

#[serde(rename = "deploymentController")] #[serde(skip_serializing_if = "Option::is_none")] pub deployment_controller: Option, @@ -2063,7 +2147,7 @@ pub struct Service { #[serde(rename = "launchType")] #[serde(skip_serializing_if = "Option::is_none")] pub launch_type: Option, - ///

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance. Tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

+ ///

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

#[serde(rename = "loadBalancers")] #[serde(skip_serializing_if = "Option::is_none")] pub load_balancers: Option>, @@ -2119,7 +2203,7 @@ pub struct Service { #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - ///

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -2135,7 +2219,7 @@ pub struct Service { ///

Details on an event associated with a service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceEvent { ///

The Unix timestamp for when the event was triggered.

#[serde(rename = "createdAt")] @@ -2174,9 +2258,9 @@ pub struct ServiceRegistry { ///

The current account setting for a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Setting { - ///

The account resource name.

+ ///

The Amazon ECS resource name.

#[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, @@ -2184,7 +2268,7 @@ pub struct Setting { #[serde(rename = "principalArn")] #[serde(skip_serializing_if = "Option::is_none")] pub principal_arn: Option, - ///

The current account setting for the resource name. If enabled, the resource receives the new Amazon Resource Name (ARN) and resource identifier (ID) format. If disabled, the resource receives the old Amazon Resource Name (ARN) and resource identifier (ID) format.

+ ///

Whether the account setting is enabled or disabled for the specified resource.

#[serde(rename = "value")] #[serde(skip_serializing_if = "Option::is_none")] pub value: Option, @@ -2223,7 +2307,7 @@ pub struct StartTaskRequest { #[serde(rename = "startedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub started_by: Option, - ///

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -2233,7 +2317,7 @@ pub struct StartTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartTaskResponse { ///

Any failures associated with the call.

#[serde(rename = "failures")] @@ -2261,7 +2345,7 @@ pub struct StopTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopTaskResponse { ///

The task that was stopped.

#[serde(rename = "task")] @@ -2281,7 +2365,7 @@ pub struct SubmitAttachmentStateChangesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubmitAttachmentStateChangesResponse { ///

Acknowledgement of the state change.

#[serde(rename = "acknowledgment")] @@ -2311,6 +2395,10 @@ pub struct SubmitContainerStateChangeRequest { #[serde(rename = "reason")] #[serde(skip_serializing_if = "Option::is_none")] pub reason: Option, + ///

The ID of the Docker container.

+ #[serde(rename = "runtimeId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub runtime_id: Option, ///

The status of the state change request.

#[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2322,7 +2410,7 @@ pub struct SubmitContainerStateChangeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubmitContainerStateChangeResponse { ///

Acknowledgement of the state change.

#[serde(rename = "acknowledgment")] @@ -2371,7 +2459,7 @@ pub struct SubmitTaskStateChangeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubmitTaskStateChangeResponse { ///

Acknowledgement of the state change.

#[serde(rename = "acknowledgment")] @@ -2392,7 +2480,7 @@ pub struct SystemControl { pub value: Option, } -///

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+///

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { ///

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

@@ -2410,18 +2498,18 @@ pub struct TagResourceRequest { ///

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon ECS tasks, services, task definitions, clusters, and container instances.

#[serde(rename = "resourceArn")] pub resource_arn: String, - ///

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The tags to add to the resource. A tag is an array of key-value pairs.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] pub tags: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Details on a task in a cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Task { ///

The Elastic Network Adapter associated with the task if the task uses the awsvpc network mode.

#[serde(rename = "attachments")] @@ -2471,6 +2559,10 @@ pub struct Task { #[serde(rename = "healthStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub health_status: Option, + ///

The Elastic Inference accelerator associated with the task.

+ #[serde(rename = "inferenceAccelerators")] + #[serde(skip_serializing_if = "Option::is_none")] + pub inference_accelerators: Option>, ///

The last known status of the task. For more information, see Task Lifecycle.

#[serde(rename = "lastStatus")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2523,7 +2615,7 @@ pub struct Task { #[serde(rename = "stoppingAt")] #[serde(skip_serializing_if = "Option::is_none")] pub stopping_at: Option, - ///

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

+ ///

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -2543,7 +2635,7 @@ pub struct Task { ///

The details of a task definition which describes the container and volume definitions of an Amazon Elastic Container Service task. You can specify which Docker images to use, the required resources, and other configurations related to launching the task definition through an Amazon ECS service or task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskDefinition { ///

The launch type to use with your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

#[serde(rename = "compatibilities")] @@ -2569,7 +2661,7 @@ pub struct TaskDefinition { #[serde(rename = "ipcMode")] #[serde(skip_serializing_if = "Option::is_none")] pub ipc_mode: Option, - ///

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

+ ///

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, this field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

#[serde(rename = "memory")] #[serde(skip_serializing_if = "Option::is_none")] pub memory: Option, @@ -2609,7 +2701,7 @@ pub struct TaskDefinition { #[serde(rename = "taskDefinitionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub task_definition_arn: Option, - ///

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

+ ///

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

#[serde(rename = "taskRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub task_role_arn: Option, @@ -2619,14 +2711,14 @@ pub struct TaskDefinition { pub volumes: Option>, } -///

An object representing a constraint on task placement in the task definition.

If you are using the Fargate launch type, task placement constraints are not supported.

For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

+///

An object representing a constraint on task placement in the task definition. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

If you are using the Fargate launch type, task placement constraints are not supported.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TaskDefinitionPlacementConstraint { ///

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

#[serde(rename = "expression")] #[serde(skip_serializing_if = "Option::is_none")] pub expression: Option, - ///

The type of constraint. The DistinctInstance constraint ensures that each task in a particular group is running on a different container instance. The MemberOf constraint restricts selection to be from a group of valid candidates.

+ ///

The type of constraint. The MemberOf constraint restricts selection to be from a group of valid candidates.

#[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -2643,6 +2735,10 @@ pub struct TaskOverride { #[serde(rename = "executionRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_role_arn: Option, + ///

The Elastic Inference accelerator override for the task.

+ #[serde(rename = "inferenceAcceleratorOverrides")] + #[serde(skip_serializing_if = "Option::is_none")] + pub inference_accelerator_overrides: Option>, ///

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

#[serde(rename = "taskRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2651,7 +2747,7 @@ pub struct TaskOverride { ///

Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskSet { ///

The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists in.

#[serde(rename = "clusterArn")] @@ -2709,7 +2805,7 @@ pub struct TaskSet { #[serde(rename = "serviceRegistries")] #[serde(skip_serializing_if = "Option::is_none")] pub service_registries: Option>, - ///

The stability status, which indicates whether the task set has reached a steady state. If the following conditions are met, the task set will be in STEADY_STATE:

  • The task runningCount is equal to the computedDesiredCount.

  • The pendingCount is 0.

  • There are no tasks running on container instances in the DRAINING status.

  • All tasks are reporting a healthy status from the load balancers, service discovery, and container health checks.

    If a healthCheckGracePeriodSeconds value was set when the service was created, you may see a STEADY_STATE reached since unhealthy Elastic Load Balancing target health checks will be ignored until it expires.

If any of those conditions are not met, the stability status returns STABILIZING.

+ ///

The stability status, which indicates whether the task set has reached a steady state. If the following conditions are met, the task set will be in STEADY_STATE:

  • The task runningCount is equal to the computedDesiredCount.

  • The pendingCount is 0.

  • There are no tasks running on container instances in the DRAINING status.

  • All tasks are reporting a healthy status from the load balancers, service discovery, and container health checks.

If any of those conditions are not met, the stability status returns STABILIZING.

#[serde(rename = "stabilityStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub stability_status: Option, @@ -2779,9 +2875,27 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateClusterSettingsRequest { + ///

The name of the cluster to modify the settings for.

+ #[serde(rename = "cluster")] + pub cluster: String, + ///

The setting to use by default for a cluster. This parameter is used to enable CloudWatch Container Insights for a cluster. If this value is specified, it will override the containerInsights value set with PutAccountSetting or PutAccountSettingDefault.

+ #[serde(rename = "settings")] + pub settings: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateClusterSettingsResponse { + #[serde(rename = "cluster")] + #[serde(skip_serializing_if = "Option::is_none")] + pub cluster: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateContainerAgentRequest { ///

The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on. If you do not specify a cluster, the default cluster is assumed.

@@ -2794,7 +2908,7 @@ pub struct UpdateContainerAgentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateContainerAgentResponse { ///

The container instance for which the container agent was updated.

#[serde(rename = "containerInstance")] @@ -2817,7 +2931,7 @@ pub struct UpdateContainerInstancesStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateContainerInstancesStateResponse { ///

The list of container instances.

#[serde(rename = "containerInstances")] @@ -2843,7 +2957,7 @@ pub struct UpdateServicePrimaryTaskSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServicePrimaryTaskSetResponse { #[serde(rename = "taskSet")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2868,7 +2982,7 @@ pub struct UpdateServiceRequest { #[serde(rename = "forceNewDeployment")] #[serde(skip_serializing_if = "Option::is_none")] pub force_new_deployment: Option, - ///

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 1,800 seconds. During that time, the ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

+ ///

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

#[serde(rename = "healthCheckGracePeriodSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub health_check_grace_period_seconds: Option, @@ -2890,7 +3004,7 @@ pub struct UpdateServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServiceResponse { ///

The full description of your service following the update call.

#[serde(rename = "service")] @@ -2914,7 +3028,7 @@ pub struct UpdateTaskSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTaskSetResponse { #[serde(rename = "taskSet")] #[serde(skip_serializing_if = "Option::is_none")] @@ -5065,6 +5179,61 @@ impl Error for UntagResourceError { } } } +/// Errors returned by UpdateClusterSettings +#[derive(Debug, PartialEq)] +pub enum UpdateClusterSettingsError { + ///

These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permissions to use the action or resource, or specifying an identifier that is not valid.

+ Client(String), + ///

The specified cluster could not be found. You can view your available clusters with ListClusters. Amazon ECS clusters are Region-specific.

+ ClusterNotFound(String), + ///

The specified parameter is invalid. Review the available parameters for the API request.

+ InvalidParameter(String), + ///

These errors are usually caused by a server issue.

+ Server(String), +} + +impl UpdateClusterSettingsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ClientException" => { + return RusotoError::Service(UpdateClusterSettingsError::Client(err.msg)) + } + "ClusterNotFoundException" => { + return RusotoError::Service(UpdateClusterSettingsError::ClusterNotFound( + err.msg, + )) + } + "InvalidParameterException" => { + return RusotoError::Service(UpdateClusterSettingsError::InvalidParameter( + err.msg, + )) + } + "ServerException" => { + return RusotoError::Service(UpdateClusterSettingsError::Server(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateClusterSettingsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateClusterSettingsError { + fn description(&self) -> &str { + match *self { + UpdateClusterSettingsError::Client(ref cause) => cause, + UpdateClusterSettingsError::ClusterNotFound(ref cause) => cause, + UpdateClusterSettingsError::InvalidParameter(ref cause) => cause, + UpdateClusterSettingsError::Server(ref cause) => cause, + } + } +} /// Errors returned by UpdateContainerAgent #[derive(Debug, PartialEq)] pub enum UpdateContainerAgentError { @@ -5472,7 +5641,7 @@ pub trait Ecs { input: CreateClusterRequest, ) -> RusotoFuture; - ///

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

  • REPLICA - The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

  • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

+ ///

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

  • REPLICA - The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

  • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

fn create_service( &self, input: CreateServiceRequest, @@ -5502,7 +5671,7 @@ pub trait Ecs { input: DeleteClusterRequest, ) -> RusotoFuture; - ///

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

+ ///

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After all tasks have transitioned to either STOPPING or STOPPED status, the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

fn delete_service( &self, input: DeleteServiceRequest, @@ -5622,13 +5791,13 @@ pub trait Ecs { input: ListTasksRequest, ) -> RusotoFuture; - ///

Modifies an account setting. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the ARN and resource ID format of the resource type for a specified IAM user, IAM role, or the root user for an account is changed. If you change the account setting for the root user, the default settings for all of the IAM users and roles for which no individual account setting has been specified are reset. The opt-in and opt-out account setting can be specified for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. You must enable this setting to use Amazon ECS features such as resource tagging.

When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is enabled, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

+ ///

Modifies an account setting. Account settings are set on a per-Region basis.

If you change the account setting for the root user, the default settings for all of the IAM users and roles for which no individual account setting has been specified are reset. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. You must enable this setting to use Amazon ECS features such as resource tagging.

When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is enabled, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

When containerInsights is specified, the default setting indicating whether CloudWatch Container Insights is enabled for your clusters is changed. If containerInsights is enabled, any new clusters that are created will have Container Insights enabled unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

fn put_account_setting( &self, input: PutAccountSettingRequest, ) -> RusotoFuture; - ///

Modifies an account setting for all IAM users on an account for whom no individual account setting has been specified.

+ ///

Modifies an account setting for all IAM users on an account for whom no individual account setting has been specified. Account settings are set on a per-Region basis.

fn put_account_setting_default( &self, input: PutAccountSettingDefaultRequest, @@ -5694,6 +5863,12 @@ pub trait Ecs { input: UntagResourceRequest, ) -> RusotoFuture; + ///

Modifies the settings to use for a cluster.

+ fn update_cluster_settings( + &self, + input: UpdateClusterSettingsRequest, + ) -> RusotoFuture; + ///

Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide.

fn update_container_agent( &self, @@ -5736,10 +5911,7 @@ impl EcsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> EcsClient { - EcsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5753,10 +5925,14 @@ impl EcsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - EcsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EcsClient { + EcsClient { client, region } } } @@ -5793,7 +5969,7 @@ impl Ecs for EcsClient { }) } - ///

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

  • REPLICA - The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

  • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

+ ///

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

  • REPLICA - The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

  • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

fn create_service( &self, input: CreateServiceRequest, @@ -5952,7 +6128,7 @@ impl Ecs for EcsClient { }) } - ///

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

+ ///

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After all tasks have transitioned to either STOPPING or STOPPED status, the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

fn delete_service( &self, input: DeleteServiceRequest, @@ -6574,7 +6750,7 @@ impl Ecs for EcsClient { }) } - ///

Modifies an account setting. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the ARN and resource ID format of the resource type for a specified IAM user, IAM role, or the root user for an account is changed. If you change the account setting for the root user, the default settings for all of the IAM users and roles for which no individual account setting has been specified are reset. The opt-in and opt-out account setting can be specified for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. You must enable this setting to use Amazon ECS features such as resource tagging.

When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is enabled, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

+ ///

Modifies an account setting. Account settings are set on a per-Region basis.

If you change the account setting for the root user, the default settings for all of the IAM users and roles for which no individual account setting has been specified are reset. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. You must enable this setting to use Amazon ECS features such as resource tagging.

When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is enabled, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

When containerInsights is specified, the default setting indicating whether CloudWatch Container Insights is enabled for your clusters is changed. If containerInsights is enabled, any new clusters that are created will have Container Insights enabled unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

fn put_account_setting( &self, input: PutAccountSettingRequest, @@ -6606,7 +6782,7 @@ impl Ecs for EcsClient { }) } - ///

Modifies an account setting for all IAM users on an account for whom no individual account setting has been specified.

+ ///

Modifies an account setting for all IAM users on an account for whom no individual account setting has been specified. Account settings are set on a per-Region basis.

fn put_account_setting_default( &self, input: PutAccountSettingDefaultRequest, @@ -6966,6 +7142,37 @@ impl Ecs for EcsClient { }) } + ///

Modifies the settings to use for a cluster.

+ fn update_cluster_settings( + &self, + input: UpdateClusterSettingsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "ecs", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AmazonEC2ContainerServiceV20141113.UpdateClusterSettings", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(UpdateClusterSettingsError::from_response(response)) + }), + ) + } + }) + } + ///

Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide.

fn update_container_agent( &self, diff --git a/rusoto/services/efs/Cargo.toml b/rusoto/services/efs/Cargo.toml index e5b4bbf09ef..c9b596adfa0 100644 --- a/rusoto/services/efs/Cargo.toml +++ b/rusoto/services/efs/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_efs" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/efs/README.md b/rusoto/services/efs/README.md index 804c114b4b6..5f961c7adf6 100644 --- a/rusoto/services/efs/README.md +++ b/rusoto/services/efs/README.md @@ -23,9 +23,16 @@ To use `rusoto_efs` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_efs = "0.40.0" +rusoto_efs = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/efs/src/custom/mod.rs b/rusoto/services/efs/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/efs/src/custom/mod.rs +++ b/rusoto/services/efs/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/efs/src/generated.rs b/rusoto/services/efs/src/generated.rs index f7aab6027c9..8e000b329c4 100644 --- a/rusoto/services/efs/src/generated.rs +++ b/rusoto/services/efs/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -135,7 +134,7 @@ pub struct DescribeFileSystemsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFileSystemsResponse { ///

An array of file system descriptions.

#[serde(rename = "FileSystems")] @@ -167,7 +166,7 @@ pub struct DescribeMountTargetSecurityGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMountTargetSecurityGroupsResponse { ///

An array of security groups.

#[serde(rename = "SecurityGroups")] @@ -197,7 +196,7 @@ pub struct DescribeMountTargetsRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMountTargetsResponse { ///

If the request included the Marker, the response returns that value in this field.

#[serde(rename = "Marker")] @@ -231,7 +230,7 @@ pub struct DescribeTagsRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTagsResponse { ///

If the request included a Marker, the response returns that value in this field.

#[serde(rename = "Marker")] @@ -248,7 +247,7 @@ pub struct DescribeTagsResponse { ///

A description of the file system.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FileSystemDescription { ///

The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).

#[serde(rename = "CreationTime")] @@ -301,7 +300,7 @@ pub struct FileSystemDescription { ///

The latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. The value doesn't represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value represents the actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not necessarily the exact size the file system was at any instant in time.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FileSystemSize { ///

The time at which the size of data, returned in the Value field, was determined. The value is the integer number of seconds since 1970-01-01T00:00:00Z.

#[serde(rename = "Timestamp")] @@ -321,7 +320,7 @@ pub struct FileSystemSize { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LifecycleConfigurationDescription { ///

An array of lifecycle management policies. Currently, EFS supports a maximum of one policy per file system.

#[serde(rename = "LifecyclePolicies")] @@ -332,7 +331,7 @@ pub struct LifecycleConfigurationDescription { ///

Describes a policy used by EFS lifecycle management to transition files to the Infrequent Access (IA) storage class.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct LifecyclePolicy { - ///

A value that indicates how long it takes to transition files to the IA storage class. Currently, the only valid value is AFTER_30_DAYS.

AFTER_30_DAYS indicates files that have not been read from or written to for 30 days are transitioned from the Standard storage class to the IA storage class. Metadata operations such as listing the contents of a directory don't count as a file access event.

+ ///

A value that describes the period of time that a file is not accessed, after which it transitions to the IA storage class. Metadata operations such as listing the contents of a directory don't count as file access events.

#[serde(rename = "TransitionToIA")] #[serde(skip_serializing_if = "Option::is_none")] pub transition_to_ia: Option, @@ -352,7 +351,7 @@ pub struct ModifyMountTargetSecurityGroupsRequest { ///

Provides a description of a mount target.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MountTargetDescription { ///

The ID of the file system for which the mount target is intended.

#[serde(rename = "FileSystemId")] @@ -1349,7 +1348,7 @@ pub trait Efs { input: ModifyMountTargetSecurityGroupsRequest, ) -> RusotoFuture<(), ModifyMountTargetSecurityGroupsError>; - ///

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

You can enable lifecycle management only for EFS file systems created after the release of EFS infrequent access.

In the request, specify the following:

  • The ID for the file system for which you are creating a lifecycle management configuration.

  • A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one "TransitionToIA": "AFTER_30_DAYS" LifecyclePolicy item.

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

+ ///

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

In the request, specify the following:

  • The ID for the file system for which you are enabling, disabling, or modifying lifecycle management.

  • A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item.

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

fn put_lifecycle_configuration( &self, input: PutLifecycleConfigurationRequest, @@ -1373,10 +1372,7 @@ impl EfsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> EfsClient { - EfsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1390,10 +1386,14 @@ impl EfsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - EfsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EfsClient { + EfsClient { client, region } } } @@ -1821,7 +1821,7 @@ impl Efs for EfsClient { }) } - ///

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

You can enable lifecycle management only for EFS file systems created after the release of EFS infrequent access.

In the request, specify the following:

  • The ID for the file system for which you are creating a lifecycle management configuration.

  • A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one "TransitionToIA": "AFTER_30_DAYS" LifecyclePolicy item.

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

+ ///

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

In the request, specify the following:

  • The ID for the file system for which you are enabling, disabling, or modifying lifecycle management.

  • A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item.

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

fn put_lifecycle_configuration( &self, input: PutLifecycleConfigurationRequest, diff --git a/rusoto/services/eks/Cargo.toml b/rusoto/services/eks/Cargo.toml index e968872adfe..75ff530047e 100644 --- a/rusoto/services/eks/Cargo.toml +++ b/rusoto/services/eks/Cargo.toml @@ -1,13 +1,13 @@ [package] authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] -description = "AWS SDK for Rust - Amazon Elastic Container Service for Kubernetes @ 2017-11-01" +description = "AWS SDK for Rust - Amazon Elastic Kubernetes Service @ 2017-11-01" documentation = "https://docs.rs/rusoto_eks" keywords = ["AWS", "Amazon", "eks"] license = "MIT" name = "rusoto_eks" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/eks/README.md b/rusoto/services/eks/README.md index 14163e6de6a..26fd7a92262 100644 --- a/rusoto/services/eks/README.md +++ b/rusoto/services/eks/README.md @@ -1,6 +1,6 @@ # Rusoto Eks -Rust SDK for Amazon Elastic Container Service for Kubernetes +Rust SDK for Amazon Elastic Kubernetes Service You may be looking for: @@ -23,9 +23,16 @@ To use `rusoto_eks` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_eks = "0.40.0" +rusoto_eks = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/eks/src/custom/mod.rs b/rusoto/services/eks/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/eks/src/custom/mod.rs +++ b/rusoto/services/eks/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/eks/src/generated.rs b/rusoto/services/eks/src/generated.rs index 8aa8ba4e553..1fed2a08aae 100644 --- a/rusoto/services/eks/src/generated.rs +++ b/rusoto/services/eks/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

An object representing the certificate-authority-data for your cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Certificate { ///

The Base64-encoded certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.

#[serde(rename = "data")] @@ -37,7 +36,7 @@ pub struct Certificate { ///

An object representing an Amazon EKS cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Cluster { ///

The Amazon Resource Name (ARN) of the cluster.

#[serde(rename = "arn")] @@ -59,6 +58,10 @@ pub struct Cluster { #[serde(rename = "endpoint")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint: Option, + ///

The identity provider information for the cluster.

+ #[serde(rename = "identity")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity: Option, ///

The logging configuration for your cluster.

#[serde(rename = "logging")] #[serde(skip_serializing_if = "Option::is_none")] @@ -83,6 +86,10 @@ pub struct Cluster { #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, + ///

The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

The Kubernetes server version for the cluster.

#[serde(rename = "version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -108,6 +115,10 @@ pub struct CreateClusterRequest { ///

The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the Amazon EKS User Guide .

#[serde(rename = "roleArn")] pub role_arn: String, + ///

The metadata to apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

The desired Kubernetes version for your cluster. If you don't specify a value here, the latest version available in Amazon EKS is used.

#[serde(rename = "version")] #[serde(skip_serializing_if = "Option::is_none")] @@ -115,7 +126,7 @@ pub struct CreateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClusterResponse { ///

The full description of your new cluster.

#[serde(rename = "cluster")] @@ -131,7 +142,7 @@ pub struct DeleteClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteClusterResponse { ///

The full description of the cluster to delete.

#[serde(rename = "cluster")] @@ -147,7 +158,7 @@ pub struct DescribeClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClusterResponse { ///

The full description of your specified cluster.

#[serde(rename = "cluster")] @@ -166,7 +177,7 @@ pub struct DescribeUpdateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUpdateResponse { ///

The full description of the specified update.

#[serde(rename = "update")] @@ -176,7 +187,7 @@ pub struct DescribeUpdateResponse { ///

An object representing an error when an asynchronous operation fails.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorDetail { ///

A brief description of the error.

  • SubnetNotFound: We couldn't find one of the subnets associated with the cluster.

  • SecurityGroupNotFound: We couldn't find one of the security groups associated with the cluster.

  • EniLimitReached: You have reached the elastic network interface limit for your account.

  • IpNotAvailable: A subnet associated with the cluster doesn't have any free IP addresses.

  • AccessDenied: You don't have permissions to perform the specified operation.

  • OperationNotPermitted: The service role associated with the cluster doesn't have the required access permissions for Amazon EKS.

  • VpcIdNotFound: We couldn't find the VPC associated with the cluster.

#[serde(rename = "errorCode")] @@ -192,6 +203,16 @@ pub struct ErrorDetail { pub resource_ids: Option>, } +///

An object representing an identity provider for authentication credentials.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Identity { + ///

The OpenID Connect identity provider information for the cluster.

+ #[serde(rename = "oidc")] + #[serde(skip_serializing_if = "Option::is_none")] + pub oidc: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListClustersRequest { ///

The maximum number of cluster results returned by ListClusters in paginated output. When you use this parameter, ListClusters returns only maxResults results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListClusters returns up to 100 results and a nextToken value if applicable.

@@ -205,7 +226,7 @@ pub struct ListClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClustersResponse { ///

A list of all of the clusters for your account in the specified Region.

#[serde(rename = "clusters")] @@ -217,6 +238,22 @@ pub struct ListClustersResponse { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + ///

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon EKS clusters.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + ///

The tags for the resource.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListUpdatesRequest { ///

The maximum number of update results returned by ListUpdates in paginated output. When you use this parameter, ListUpdates returns only maxResults results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListUpdates request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListUpdates returns up to 100 results and a nextToken value if applicable.

@@ -233,7 +270,7 @@ pub struct ListUpdatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUpdatesResponse { ///

The nextToken value to include in a future ListUpdates request. When the results of a ListUpdates request exceed maxResults, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -267,9 +304,47 @@ pub struct Logging { pub cluster_logging: Option>, } +///

An object representing the OpenID Connect identity provider information for the cluster.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct OIDC { + ///

The issuer URL for the OpenID Connect identity provider.

+ #[serde(rename = "issuer")] + #[serde(skip_serializing_if = "Option::is_none")] + pub issuer: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon EKS clusters.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

The tags to add to the resource. A tag is an array of key-value pairs.

+ #[serde(rename = "tags")] + pub tags: ::std::collections::HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resources are Amazon EKS clusters.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

The keys of the tags to be removed.

+ #[serde(rename = "tagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + ///

An object representing an asynchronous update.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Update { ///

The Unix epoch timestamp in seconds for when the update was created.

#[serde(rename = "createdAt")] @@ -316,7 +391,7 @@ pub struct UpdateClusterConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateClusterConfigResponse { #[serde(rename = "update")] #[serde(skip_serializing_if = "Option::is_none")] @@ -338,7 +413,7 @@ pub struct UpdateClusterVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateClusterVersionResponse { ///

The full description of the specified update

#[serde(rename = "update")] @@ -348,7 +423,7 @@ pub struct UpdateClusterVersionResponse { ///

An object representing the details of an update request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateParam { ///

The keys associated with an update request.

#[serde(rename = "type")] @@ -383,7 +458,7 @@ pub struct VpcConfigRequest { ///

An object representing an Amazon EKS cluster VPC configuration response.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcConfigResponse { ///

This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet.

#[serde(rename = "endpointPrivateAccess")] @@ -688,6 +763,45 @@ impl Error for ListClustersError { } } } +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

This exception is thrown if the request contains a semantic error. The precise meaning will depend on the API, and will be documented in the error message.

+ BadRequest(String), + ///

A service resource associated with the request could not be found. Clients should not retry such requests.

+ NotFound(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListTagsForResourceError::BadRequest(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::NotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::BadRequest(ref cause) => cause, + ListTagsForResourceError::NotFound(ref cause) => cause, + } + } +} /// Errors returned by ListUpdates #[derive(Debug, PartialEq)] pub enum ListUpdatesError { @@ -739,6 +853,84 @@ impl Error for ListUpdatesError { } } } +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

This exception is thrown if the request contains a semantic error. The precise meaning will depend on the API, and will be documented in the error message.

+ BadRequest(String), + ///

A service resource associated with the request could not be found. Clients should not retry such requests.

+ NotFound(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(TagResourceError::BadRequest(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(TagResourceError::NotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::BadRequest(ref cause) => cause, + TagResourceError::NotFound(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

This exception is thrown if the request contains a semantic error. The precise meaning will depend on the API, and will be documented in the error message.

+ BadRequest(String), + ///

A service resource associated with the request could not be found. Clients should not retry such requests.

+ NotFound(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UntagResourceError::BadRequest(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(UntagResourceError::NotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::BadRequest(ref cause) => cause, + UntagResourceError::NotFound(ref cause) => cause, + } + } +} /// Errors returned by UpdateClusterConfig #[derive(Debug, PartialEq)] pub enum UpdateClusterConfigError { @@ -905,12 +1097,30 @@ pub trait Eks { input: ListClustersRequest, ) -> RusotoFuture; + ///

List the tags for an Amazon EKS resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + ///

Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.

fn list_updates( &self, input: ListUpdatesRequest, ) -> RusotoFuture; + ///

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + ///

Deletes specified tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + ///

Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

At this time, you can not update the subnets or security group IDs for an existing cluster.

Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

fn update_cluster_config( &self, @@ -935,10 +1145,7 @@ impl EksClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> EksClient { - EksClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -952,10 +1159,14 @@ impl EksClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - EksClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EksClient { + EksClient { client, region } } } @@ -1121,6 +1332,34 @@ impl Eks for EksClient { }) } + ///

List the tags for an Amazon EKS resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("GET", "eks", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + ///

Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.

fn list_updates( &self, @@ -1159,6 +1398,73 @@ impl Eks for EksClient { }) } + ///

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("POST", "eks", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Deletes specified tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("DELETE", "eks", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + for item in input.tag_keys.iter() { + params.put("tagKeys", item); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + ///

Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

At this time, you can not update the subnets or security group IDs for an existing cluster.

Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

fn update_cluster_config( &self, diff --git a/rusoto/services/eks/src/lib.rs b/rusoto/services/eks/src/lib.rs index faeb983f856..c7a63becdf3 100644 --- a/rusoto/services/eks/src/lib.rs +++ b/rusoto/services/eks/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

+//!

Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

//! //! If you're using the service, you're probably looking for [EksClient](struct.EksClient.html) and [Eks](trait.Eks.html). diff --git a/rusoto/services/elasticache/Cargo.toml b/rusoto/services/elasticache/Cargo.toml index cac636e6d7f..8b42e03fd2d 100644 --- a/rusoto/services/elasticache/Cargo.toml +++ b/rusoto/services/elasticache/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_elasticache" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/elasticache/README.md b/rusoto/services/elasticache/README.md index 3af0c89994c..a1dc5533b70 100644 --- a/rusoto/services/elasticache/README.md +++ b/rusoto/services/elasticache/README.md @@ -23,9 +23,16 @@ To use `rusoto_elasticache` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_elasticache = "0.40.0" +rusoto_elasticache = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/elasticache/src/custom/mod.rs b/rusoto/services/elasticache/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/elasticache/src/custom/mod.rs +++ b/rusoto/services/elasticache/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/elasticache/src/generated.rs b/rusoto/services/elasticache/src/generated.rs index 2c601de7407..3f1c7231a94 100644 --- a/rusoto/services/elasticache/src/generated.rs +++ b/rusoto/services/elasticache/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -71,6 +70,8 @@ impl AllowedNodeGroupIdDeserializer { ///

Represents the allowed node types you can use to modify your cluster or replication group.

#[derive(Default, Debug, Clone, PartialEq)] pub struct AllowedNodeTypeModificationsMessage { + ///

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling down on a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

+ pub scale_down_modifications: Option>, ///

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling up a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

pub scale_up_modifications: Option>, } @@ -87,6 +88,11 @@ impl AllowedNodeTypeModificationsMessageDeserializer { stack, |name, stack, obj| { match name { + "ScaleDownModifications" => { + obj.scale_down_modifications.get_or_insert(vec![]).extend( + NodeTypeListDeserializer::deserialize("ScaleDownModifications", stack)?, + ); + } "ScaleUpModifications" => { obj.scale_up_modifications.get_or_insert(vec![]).extend( NodeTypeListDeserializer::deserialize("ScaleUpModifications", stack)?, @@ -313,7 +319,7 @@ impl BooleanOptionalDeserializer { ///

Contains all of the attributes of a specific cluster.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CacheCluster { - ///

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

+ ///

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

pub at_rest_encryption_enabled: Option, ///

A flag that enables using an AuthToken (password) when issuing Redis commands.

Default: false

pub auth_token_enabled: Option, @@ -360,7 +366,7 @@ pub struct CacheCluster { pub snapshot_retention_limit: Option, ///

The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your cluster.

Example: 05:00-09:00

pub snapshot_window: Option, - ///

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

+ ///

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

pub transit_encryption_enabled: Option, } @@ -1533,6 +1539,8 @@ impl ConfigureShardSerializer { ///

Represents the input of a CopySnapshotMessage operation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CopySnapshotMessage { + ///

The ID of the KMS key used to encrypt the target snapshot.

+ pub kms_key_id: Option, ///

The name of an existing snapshot from which to make a copy.

pub source_snapshot_name: String, ///

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

@@ -1550,6 +1558,9 @@ impl CopySnapshotMessageSerializer { prefix.push_str("."); } + if let Some(ref field_value) = obj.kms_key_id { + params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); + } params.put( &format!("{}{}", prefix, "SourceSnapshotName"), &obj.source_snapshot_name, @@ -1596,7 +1607,7 @@ pub struct CreateCacheClusterMessage { pub auth_token: Option, ///

This parameter is currently disabled.

pub auto_minor_version_upgrade: Option, - ///

The node group (shard) identifier. This parameter is stored as a lowercase string.

Constraints:

  • A name must contain from 1 to 20 alphanumeric characters or hyphens.

  • The first character must be a letter.

  • A name cannot end with a hyphen or contain two consecutive hyphens.

+ ///

The node group (shard) identifier. This parameter is stored as a lowercase string.

Constraints:

  • A name must contain from 1 to 50 alphanumeric characters or hyphens.

  • The first character must be a letter.

  • A name cannot end with a hyphen or contain two consecutive hyphens.

pub cache_cluster_id: String, ///

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

pub cache_node_type: Option, @@ -1976,7 +1987,7 @@ impl CreateCacheSubnetGroupResultDeserializer { ///

Represents the input of a CreateReplicationGroup operation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CreateReplicationGroupMessage { - ///

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

+ ///

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

pub at_rest_encryption_enabled: Option, ///

Reserved parameter. The password used to access a password protected server.

AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true.

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

Password constraints:

  • Must be only printable ASCII characters.

  • Must be at least 16 characters and no more than 128 characters in length.

  • Cannot contain any of the following characters: '/', '"', or '@'.

For more information, see AUTH password at http://redis.io/commands/AUTH.

pub auth_token: Option, @@ -1996,6 +2007,8 @@ pub struct CreateReplicationGroupMessage { pub engine: Option, ///

The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

pub engine_version: Option, + ///

The ID of the KMS key used to encrypt the disk on the cluster.

+ pub kms_key_id: Option, ///

A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots.

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file, you must configure each node group (shard) using this parameter because you must specify the slots for each node group.

pub node_group_configuration: Option>, ///

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.

The Amazon SNS topic owner must be the same as the cluster owner.

@@ -2016,7 +2029,7 @@ pub struct CreateReplicationGroupMessage { pub replicas_per_node_group: Option, ///

A user-created description for the replication group.

pub replication_group_description: String, - ///

The replication group identifier. This parameter is stored as a lowercase string.

Constraints:

  • A name must contain from 1 to 20 alphanumeric characters or hyphens.

  • The first character must be a letter.

  • A name cannot end with a hyphen or contain two consecutive hyphens.

+ ///

The replication group identifier. This parameter is stored as a lowercase string.

Constraints:

  • A name must contain from 1 to 40 alphanumeric characters or hyphens.

  • The first character must be a letter.

  • A name cannot end with a hyphen or contain two consecutive hyphens.

pub replication_group_id: String, ///

One or more Amazon VPC security groups associated with this replication group.

Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).

pub security_group_ids: Option>, @@ -2030,7 +2043,7 @@ pub struct CreateReplicationGroupMessage { pub snapshot_window: Option, ///

A list of cost allocation tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue.

pub tags: Option>, - ///

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6 or 4.x, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

+ ///

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

pub transit_encryption_enabled: Option, } @@ -2092,6 +2105,9 @@ impl CreateReplicationGroupMessageSerializer { if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } + if let Some(ref field_value) = obj.kms_key_id { + params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); + } if let Some(ref field_value) = obj.node_group_configuration { NodeGroupConfigurationListSerializer::serialize( params, @@ -2217,6 +2233,8 @@ impl CreateReplicationGroupResultDeserializer { pub struct CreateSnapshotMessage { ///

The identifier of an existing cluster. The snapshot is created from this cluster.

pub cache_cluster_id: Option, + ///

The ID of the KMS key used to encrypt the snapshot.

+ pub kms_key_id: Option, ///

The identifier of an existing replication group. The snapshot is created from this replication group.

pub replication_group_id: Option, ///

A name for the snapshot being created.

@@ -2235,6 +2253,9 @@ impl CreateSnapshotMessageSerializer { if let Some(ref field_value) = obj.cache_cluster_id { params.put(&format!("{}{}", prefix, "CacheClusterId"), &field_value); } + if let Some(ref field_value) = obj.kms_key_id { + params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); + } if let Some(ref field_value) = obj.replication_group_id { params.put(&format!("{}{}", prefix, "ReplicationGroupId"), &field_value); } @@ -3699,7 +3720,7 @@ impl ListTagsForResourceMessageSerializer { ///

Represents the input of a ModifyCacheCluster operation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyCacheClusterMessage { - ///

Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.

Valid values: single-az | cross-az.

This option is only supported for Memcached clusters.

You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone.

Only newly created nodes are located in different Availability Zones. For instructions on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

+ ///

Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.

Valid values: single-az | cross-az.

This option is only supported for Memcached clusters.

You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone.

Only newly created nodes are located in different Availability Zones. For instructions on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

pub az_mode: Option, ///

If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cluster.

If false, changes to the cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

If you perform a ModifyCacheCluster before a pending modification is applied, the pending modification is replaced by the newer modification.

Valid values: true | false

Default: false

pub apply_immediately: Option, @@ -3717,7 +3738,7 @@ pub struct ModifyCacheClusterMessage { pub cache_security_group_names: Option>, ///

The upgraded version of the cache engine to be run on the cache nodes.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.

pub engine_version: Option, - ///

The list of Availability Zones where the new Memcached cache nodes are created.

This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.

This option is only supported on Memcached clusters.

Scenarios:

  • Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.

  • Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.

  • Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all pending operations.

The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.

If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

Impact of new add/remove requests upon pending requests

  • Scenario-1

    • Pending Action: Delete

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending delete.

  • Scenario-2

    • Pending Action: Delete

    • New Request: Create

    • Result: The new create, pending or immediate, replaces the pending delete.

  • Scenario-3

    • Pending Action: Create

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending create.

  • Scenario-4

    • Pending Action: Create

    • New Request: Create

    • Result: The new create is added to the pending create.

      Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.

+ ///

The list of Availability Zones where the new Memcached cache nodes are created.

This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.

This option is only supported on Memcached clusters.

Scenarios:

  • Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.

  • Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.

  • Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all pending operations.

The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.

If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

Impact of new add/remove requests upon pending requests

  • Scenario-1

    • Pending Action: Delete

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending delete.

  • Scenario-2

    • Pending Action: Delete

    • New Request: Create

    • Result: The new create, pending or immediate, replaces the pending delete.

  • Scenario-3

    • Pending Action: Create

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending create.

  • Scenario-4

    • Pending Action: Create

    • New Request: Create

    • Result: The new create is added to the pending create.

      Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.

pub new_availability_zones: Option>, ///

The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.

The Amazon SNS topic owner must be same as the cluster owner.

pub notification_topic_arn: Option, @@ -4231,12 +4252,14 @@ impl ModifyReplicationGroupShardConfigurationResultDeserializer { ///

Represents a collection of cache nodes in a replication group. One node in the node group is the read/write primary node. All the other nodes are read-only Replica nodes.

#[derive(Default, Debug, Clone, PartialEq)] pub struct NodeGroup { - ///

The identifier for the node group (shard). A Redis (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis (cluster mode enabled) replication group contains 1 to 15 node groups numbered 0001 to 0015. Optionally, the user can provide the id for a node group.

+ ///

The identifier for the node group (shard). A Redis (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group.

pub node_group_id: Option, ///

A list containing information about individual nodes within the node group (shard).

pub node_group_members: Option>, ///

The endpoint of the primary node in this node group (shard).

pub primary_endpoint: Option, + ///

The endpoint of the replica nodes in this node group (shard).

+ pub reader_endpoint: Option, ///

The keyspace for this node group (shard).

pub slots: Option, ///

The current state of this replication group - creating, available, etc.

@@ -4265,6 +4288,10 @@ impl NodeGroupDeserializer { obj.primary_endpoint = Some(EndpointDeserializer::deserialize("PrimaryEndpoint", stack)?); } + "ReaderEndpoint" => { + obj.reader_endpoint = + Some(EndpointDeserializer::deserialize("ReaderEndpoint", stack)?); + } "Slots" => { obj.slots = Some(StringDeserializer::deserialize("Slots", stack)?); } @@ -5310,7 +5337,7 @@ impl ReplicaConfigurationListSerializer { ///

Contains all of the attributes of a specific Redis replication group.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ReplicationGroup { - ///

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

+ ///

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

pub at_rest_encryption_enabled: Option, ///

A flag that enables using an AuthToken (password) when issuing Redis commands.

Default: false

pub auth_token_enabled: Option, @@ -5324,6 +5351,8 @@ pub struct ReplicationGroup { pub configuration_endpoint: Option, ///

The user supplied description of the replication group.

pub description: Option, + ///

The ID of the KMS key used to encrypt the disk in the cluster.

+ pub kms_key_id: Option, ///

The names of all the cache clusters that are part of this replication group.

pub member_clusters: Option>, ///

A list of node groups in this replication group. For Redis (cluster mode disabled) replication groups, this is a single-element list. For Redis (cluster mode enabled) replication groups, the list contains an entry for each node group (shard).

@@ -5340,7 +5369,7 @@ pub struct ReplicationGroup { pub snapshotting_cluster_id: Option, ///

The current state of this replication group - creating, available, modifying, deleting, create-failed, snapshotting.

pub status: Option, - ///

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

+ ///

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

pub transit_encryption_enabled: Option, } @@ -5390,6 +5419,9 @@ impl ReplicationGroupDeserializer { "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } + "KmsKeyId" => { + obj.kms_key_id = Some(StringDeserializer::deserialize("KmsKeyId", stack)?); + } "MemberClusters" => { obj.member_clusters.get_or_insert(vec![]).extend( ClusterIdListDeserializer::deserialize("MemberClusters", stack)?, @@ -6367,6 +6399,8 @@ pub struct Snapshot { pub engine: Option, ///

The version of the cache engine version that is used by the source cluster.

pub engine_version: Option, + ///

The ID of the KMS key used to encrypt the snapshot.

+ pub kms_key_id: Option, ///

A list of the cache nodes in the source cluster.

pub node_snapshots: Option>, ///

The number of cache nodes in the source cluster.

For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

@@ -6454,6 +6488,9 @@ impl SnapshotDeserializer { obj.engine_version = Some(StringDeserializer::deserialize("EngineVersion", stack)?); } + "KmsKeyId" => { + obj.kms_key_id = Some(StringDeserializer::deserialize("KmsKeyId", stack)?); + } "NodeSnapshots" => { obj.node_snapshots.get_or_insert(vec![]).extend( NodeSnapshotListDeserializer::deserialize("NodeSnapshots", stack)?, @@ -7940,7 +7977,7 @@ pub enum CreateReplicationGroupError { InvalidParameterValue(String), ///

The VPC network is in an invalid state.

InvalidVPCNetworkStateFault(String), - ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 15

+ ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 90

NodeGroupsPerReplicationGroupQuotaExceededFault(String), ///

The request cannot be processed because it would exceed the allowed number of cache nodes in a single cluster.

NodeQuotaForClusterExceededFault(String), @@ -8141,7 +8178,7 @@ pub enum DecreaseReplicaCountError { InvalidVPCNetworkStateFault(String), ///

The operation was not performed because no changes were required.

NoOperationFault(String), - ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 15

+ ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 90

NodeGroupsPerReplicationGroupQuotaExceededFault(String), ///

The request cannot be processed because it would exceed the allowed number of cache nodes per customer.

NodeQuotaForCustomerExceededFault(String), @@ -9667,6 +9704,8 @@ pub enum IncreaseReplicaCountError { InsufficientCacheClusterCapacityFault(String), ///

The requested cluster is not in the available state.

InvalidCacheClusterStateFault(String), + ///

The KMS key supplied is not valid.

+ InvalidKMSKeyFault(String), ///

Two or more incompatible parameters were specified.

InvalidParameterCombination(String), ///

The value for a parameter is invalid.

@@ -9677,7 +9716,7 @@ pub enum IncreaseReplicaCountError { InvalidVPCNetworkStateFault(String), ///

The operation was not performed because no changes were required.

NoOperationFault(String), - ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 15

+ ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 90

NodeGroupsPerReplicationGroupQuotaExceededFault(String), ///

The request cannot be processed because it would exceed the allowed number of cache nodes per customer.

NodeQuotaForCustomerExceededFault(String), @@ -9714,6 +9753,11 @@ impl IncreaseReplicaCountError { ), ) } + "InvalidKMSKeyFault" => { + return RusotoError::Service(IncreaseReplicaCountError::InvalidKMSKeyFault( + parsed_error.message, + )) + } "InvalidParameterCombination" => { return RusotoError::Service( IncreaseReplicaCountError::InvalidParameterCombination( @@ -9790,6 +9834,7 @@ impl Error for IncreaseReplicaCountError { IncreaseReplicaCountError::ClusterQuotaForCustomerExceededFault(ref cause) => cause, IncreaseReplicaCountError::InsufficientCacheClusterCapacityFault(ref cause) => cause, IncreaseReplicaCountError::InvalidCacheClusterStateFault(ref cause) => cause, + IncreaseReplicaCountError::InvalidKMSKeyFault(ref cause) => cause, IncreaseReplicaCountError::InvalidParameterCombination(ref cause) => cause, IncreaseReplicaCountError::InvalidParameterValue(ref cause) => cause, IncreaseReplicaCountError::InvalidReplicationGroupStateFault(ref cause) => cause, @@ -10269,6 +10314,8 @@ pub enum ModifyReplicationGroupError { InvalidCacheClusterStateFault(String), ///

The current state of the cache security group does not allow deletion.

InvalidCacheSecurityGroupStateFault(String), + ///

The KMS key supplied is not valid.

+ InvalidKMSKeyFault(String), ///

Two or more incompatible parameters were specified.

InvalidParameterCombination(String), ///

The value for a parameter is invalid.

@@ -10335,6 +10382,11 @@ impl ModifyReplicationGroupError { ), ) } + "InvalidKMSKeyFault" => { + return RusotoError::Service( + ModifyReplicationGroupError::InvalidKMSKeyFault(parsed_error.message), + ) + } "InvalidParameterCombination" => { return RusotoError::Service( ModifyReplicationGroupError::InvalidParameterCombination( @@ -10413,6 +10465,7 @@ impl Error for ModifyReplicationGroupError { ModifyReplicationGroupError::InsufficientCacheClusterCapacityFault(ref cause) => cause, ModifyReplicationGroupError::InvalidCacheClusterStateFault(ref cause) => cause, ModifyReplicationGroupError::InvalidCacheSecurityGroupStateFault(ref cause) => cause, + ModifyReplicationGroupError::InvalidKMSKeyFault(ref cause) => cause, ModifyReplicationGroupError::InvalidParameterCombination(ref cause) => cause, ModifyReplicationGroupError::InvalidParameterValue(ref cause) => cause, ModifyReplicationGroupError::InvalidReplicationGroupStateFault(ref cause) => cause, @@ -10430,6 +10483,8 @@ pub enum ModifyReplicationGroupShardConfigurationError { InsufficientCacheClusterCapacityFault(String), ///

The requested cluster is not in the available state.

InvalidCacheClusterStateFault(String), + ///

The KMS key supplied is not valid.

+ InvalidKMSKeyFault(String), ///

Two or more incompatible parameters were specified.

InvalidParameterCombination(String), ///

The value for a parameter is invalid.

@@ -10438,7 +10493,7 @@ pub enum ModifyReplicationGroupShardConfigurationError { InvalidReplicationGroupStateFault(String), ///

The VPC network is in an invalid state.

InvalidVPCNetworkStateFault(String), - ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 15

+ ///

The request cannot be processed because it would exceed the maximum allowed number of node groups (shards) in a single replication group. The default maximum is 90

NodeGroupsPerReplicationGroupQuotaExceededFault(String), ///

The request cannot be processed because it would exceed the allowed number of cache nodes per customer.

NodeQuotaForCustomerExceededFault(String), @@ -10456,7 +10511,7 @@ impl ModifyReplicationGroupShardConfigurationError { find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { - "InsufficientCacheClusterCapacity" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InsufficientCacheClusterCapacityFault(parsed_error.message)),"InvalidCacheClusterState" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidCacheClusterStateFault(parsed_error.message)),"InvalidParameterCombination" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidParameterCombination(parsed_error.message)),"InvalidParameterValue" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidParameterValue(parsed_error.message)),"InvalidReplicationGroupState" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidReplicationGroupStateFault(parsed_error.message)),"InvalidVPCNetworkStateFault" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidVPCNetworkStateFault(parsed_error.message)),"NodeGroupsPerReplicationGroupQuotaExceeded" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::NodeGroupsPerReplicationGroupQuotaExceededFault(parsed_error.message)),"NodeQuotaForCustomerExceeded" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::NodeQuotaForCustomerExceededFault(parsed_error.message)),"ReplicationGroupNotFoundFault" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::ReplicationGroupNotFoundFault(parsed_error.message)),_ => {} + "InsufficientCacheClusterCapacity" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InsufficientCacheClusterCapacityFault(parsed_error.message)),"InvalidCacheClusterState" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidCacheClusterStateFault(parsed_error.message)),"InvalidKMSKeyFault" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidKMSKeyFault(parsed_error.message)),"InvalidParameterCombination" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidParameterCombination(parsed_error.message)),"InvalidParameterValue" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidParameterValue(parsed_error.message)),"InvalidReplicationGroupState" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidReplicationGroupStateFault(parsed_error.message)),"InvalidVPCNetworkStateFault" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::InvalidVPCNetworkStateFault(parsed_error.message)),"NodeGroupsPerReplicationGroupQuotaExceeded" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::NodeGroupsPerReplicationGroupQuotaExceededFault(parsed_error.message)),"NodeQuotaForCustomerExceeded" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::NodeQuotaForCustomerExceededFault(parsed_error.message)),"ReplicationGroupNotFoundFault" => return RusotoError::Service(ModifyReplicationGroupShardConfigurationError::ReplicationGroupNotFoundFault(parsed_error.message)),_ => {} } } } @@ -10481,6 +10536,7 @@ impl Error for ModifyReplicationGroupShardConfigurationError { match *self { ModifyReplicationGroupShardConfigurationError::InsufficientCacheClusterCapacityFault(ref cause) => cause, ModifyReplicationGroupShardConfigurationError::InvalidCacheClusterStateFault(ref cause) => cause, +ModifyReplicationGroupShardConfigurationError::InvalidKMSKeyFault(ref cause) => cause, ModifyReplicationGroupShardConfigurationError::InvalidParameterCombination(ref cause) => cause, ModifyReplicationGroupShardConfigurationError::InvalidParameterValue(ref cause) => cause, ModifyReplicationGroupShardConfigurationError::InvalidReplicationGroupStateFault(ref cause) => cause, @@ -10868,6 +10924,8 @@ pub enum TestFailoverError { APICallRateForCustomerExceededFault(String), ///

The requested cluster is not in the available state.

InvalidCacheClusterStateFault(String), + ///

The KMS key supplied is not valid.

+ InvalidKMSKeyFault(String), ///

Two or more incompatible parameters were specified.

InvalidParameterCombination(String), ///

The value for a parameter is invalid.

@@ -10902,6 +10960,11 @@ impl TestFailoverError { TestFailoverError::InvalidCacheClusterStateFault(parsed_error.message), ) } + "InvalidKMSKeyFault" => { + return RusotoError::Service(TestFailoverError::InvalidKMSKeyFault( + parsed_error.message, + )) + } "InvalidParameterCombination" => { return RusotoError::Service( TestFailoverError::InvalidParameterCombination(parsed_error.message), @@ -10959,6 +11022,7 @@ impl Error for TestFailoverError { match *self { TestFailoverError::APICallRateForCustomerExceededFault(ref cause) => cause, TestFailoverError::InvalidCacheClusterStateFault(ref cause) => cause, + TestFailoverError::InvalidKMSKeyFault(ref cause) => cause, TestFailoverError::InvalidParameterCombination(ref cause) => cause, TestFailoverError::InvalidParameterValue(ref cause) => cause, TestFailoverError::InvalidReplicationGroupStateFault(ref cause) => cause, @@ -10985,7 +11049,7 @@ pub trait ElastiCache { AuthorizeCacheSecurityGroupIngressError, >; - ///

Apply the service update. For more information on service updates and applying them, see Applying Service Updates.

+ ///

Apply the service update. For more information on service updates and applying them, see Applying Service Updates.

fn batch_apply_update_action( &self, input: BatchApplyUpdateActionMessage, @@ -10997,7 +11061,7 @@ pub trait ElastiCache { input: BatchStopUpdateActionMessage, ) -> RusotoFuture; - ///

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

+ ///

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

fn copy_snapshot( &self, input: CopySnapshotMessage, @@ -11027,7 +11091,7 @@ pub trait ElastiCache { input: CreateCacheSubnetGroupMessage, ) -> RusotoFuture; - ///

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

+ ///

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

fn create_replication_group( &self, input: CreateReplicationGroupMessage, @@ -11171,7 +11235,7 @@ pub trait ElastiCache { input: IncreaseReplicaCountMessage, ) -> RusotoFuture; - ///

Lists all available node types that you can scale your Redis cluster's or replication group's current node type up to.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale up your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

+ ///

Lists all available node types that you can scale your Redis cluster's or replication group's current node type.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

fn list_allowed_node_type_modifications( &self, input: ListAllowedNodeTypeModificationsMessage, @@ -11267,10 +11331,7 @@ impl ElastiCacheClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ElastiCacheClient { - ElastiCacheClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -11284,10 +11345,14 @@ impl ElastiCacheClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ElastiCacheClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ElastiCacheClient { + ElastiCacheClient { client, region } } } @@ -11324,7 +11389,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11377,7 +11442,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11396,7 +11461,7 @@ impl ElastiCache for ElastiCacheClient { }) } - ///

Apply the service update. For more information on service updates and applying them, see Applying Service Updates.

+ ///

Apply the service update. For more information on service updates and applying them, see Applying Service Updates.

fn batch_apply_update_action( &self, input: BatchApplyUpdateActionMessage, @@ -11425,7 +11490,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11473,7 +11538,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11492,7 +11557,7 @@ impl ElastiCache for ElastiCacheClient { }) } - ///

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

+ ///

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

fn copy_snapshot( &self, input: CopySnapshotMessage, @@ -11524,7 +11589,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11575,7 +11640,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11623,7 +11688,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11671,7 +11736,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11719,7 +11784,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11738,7 +11803,7 @@ impl ElastiCache for ElastiCacheClient { }) } - ///

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

+ ///

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

fn create_replication_group( &self, input: CreateReplicationGroupMessage, @@ -11767,7 +11832,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11818,7 +11883,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11868,7 +11933,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11919,7 +11984,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12042,7 +12107,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12093,7 +12158,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12141,7 +12206,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12189,7 +12254,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12237,7 +12302,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12285,7 +12350,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12333,7 +12398,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12381,7 +12446,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12432,7 +12497,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12483,7 +12548,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12529,7 +12594,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12577,7 +12642,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12628,7 +12693,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12676,7 +12741,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12727,7 +12792,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12775,7 +12840,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12825,7 +12890,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12844,7 +12909,7 @@ impl ElastiCache for ElastiCacheClient { }) } - ///

Lists all available node types that you can scale your Redis cluster's or replication group's current node type up to.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale up your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

+ ///

Lists all available node types that you can scale your Redis cluster's or replication group's current node type.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

fn list_allowed_node_type_modifications( &self, input: ListAllowedNodeTypeModificationsMessage, @@ -12876,7 +12941,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12926,7 +12991,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12977,7 +13042,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13025,7 +13090,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13073,7 +13138,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13121,7 +13186,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13176,7 +13241,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13230,7 +13295,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13281,7 +13346,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13329,7 +13394,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13377,7 +13442,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13428,7 +13493,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13479,7 +13544,7 @@ impl ElastiCache for ElastiCacheClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/elasticbeanstalk/Cargo.toml b/rusoto/services/elasticbeanstalk/Cargo.toml index fee41d3c69d..f49c4aaac06 100644 --- a/rusoto/services/elasticbeanstalk/Cargo.toml +++ b/rusoto/services/elasticbeanstalk/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_elasticbeanstalk" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/elasticbeanstalk/README.md b/rusoto/services/elasticbeanstalk/README.md index 50314ed1368..eec1b50a19b 100644 --- a/rusoto/services/elasticbeanstalk/README.md +++ b/rusoto/services/elasticbeanstalk/README.md @@ -23,9 +23,16 @@ To use `rusoto_elasticbeanstalk` in your application, add it as a dependency in ```toml [dependencies] -rusoto_elasticbeanstalk = "0.40.0" +rusoto_elasticbeanstalk = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/elasticbeanstalk/src/custom/mod.rs b/rusoto/services/elasticbeanstalk/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/elasticbeanstalk/src/custom/mod.rs +++ b/rusoto/services/elasticbeanstalk/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/elasticbeanstalk/src/generated.rs b/rusoto/services/elasticbeanstalk/src/generated.rs index 86ddd35192d..87f785d7f65 100644 --- a/rusoto/services/elasticbeanstalk/src/generated.rs +++ b/rusoto/services/elasticbeanstalk/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -9919,10 +9918,7 @@ impl ElasticBeanstalkClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ElasticBeanstalkClient { - ElasticBeanstalkClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -9936,10 +9932,14 @@ impl ElasticBeanstalkClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ElasticBeanstalkClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ElasticBeanstalkClient { + ElasticBeanstalkClient { client, region } } } @@ -9998,7 +9998,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10048,7 +10048,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10098,7 +10098,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10149,7 +10149,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10197,7 +10197,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10245,7 +10245,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10296,7 +10296,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10344,7 +10344,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10391,7 +10391,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10542,7 +10542,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10589,7 +10589,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10637,7 +10637,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10687,7 +10687,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10735,7 +10735,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10783,7 +10783,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10831,7 +10831,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10888,7 +10888,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10940,7 +10940,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10989,7 +10989,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11039,7 +11039,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11090,7 +11090,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11138,7 +11138,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11186,7 +11186,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11234,7 +11234,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11284,7 +11284,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11334,7 +11334,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11463,7 +11463,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11538,7 +11538,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11589,7 +11589,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11642,7 +11642,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11691,7 +11691,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11739,7 +11739,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11790,7 +11790,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11864,7 +11864,7 @@ impl ElasticBeanstalk for ElasticBeanstalkClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/elastictranscoder/Cargo.toml b/rusoto/services/elastictranscoder/Cargo.toml index ad7ba9f4cd0..28a428ba478 100644 --- a/rusoto/services/elastictranscoder/Cargo.toml +++ b/rusoto/services/elastictranscoder/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_elastictranscoder" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/elastictranscoder/README.md b/rusoto/services/elastictranscoder/README.md index bd9dea3073b..5364450fffd 100644 --- a/rusoto/services/elastictranscoder/README.md +++ b/rusoto/services/elastictranscoder/README.md @@ -23,9 +23,16 @@ To use `rusoto_elastictranscoder` in your application, add it as a dependency in ```toml [dependencies] -rusoto_elastictranscoder = "0.40.0" +rusoto_elastictranscoder = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/elastictranscoder/src/custom/mod.rs b/rusoto/services/elastictranscoder/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/elastictranscoder/src/custom/mod.rs +++ b/rusoto/services/elastictranscoder/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/elastictranscoder/src/generated.rs b/rusoto/services/elastictranscoder/src/generated.rs index aacf3762466..53101bfb692 100644 --- a/rusoto/services/elastictranscoder/src/generated.rs +++ b/rusoto/services/elastictranscoder/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -118,7 +117,7 @@ pub struct CancelJobRequest { ///

The response body contains a JSON object. If the job is successfully canceled, the value of Success is true.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelJobResponse {} ///

The file format of the output captions. If you leave this value blank, Elastic Transcoder returns an error.

@@ -287,7 +286,7 @@ pub struct CreateJobRequest { ///

The CreateJobResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobResponse { ///

A section of the response body that provides information about the job that is created.

#[serde(rename = "Job")] @@ -331,7 +330,7 @@ pub struct CreatePipelineRequest { ///

When you create a pipeline, Elastic Transcoder returns the values that you specified in the request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePipelineResponse { ///

A section of the response body that provides information about the pipeline that is created.

#[serde(rename = "Pipeline")] @@ -372,7 +371,7 @@ pub struct CreatePresetRequest { ///

The CreatePresetResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePresetResponse { ///

A section of the response body that provides information about the preset that is created.

#[serde(rename = "Preset")] @@ -394,7 +393,7 @@ pub struct DeletePipelineRequest { ///

The DeletePipelineResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePipelineResponse {} ///

The DeletePresetRequest structure.

@@ -407,7 +406,7 @@ pub struct DeletePresetRequest { ///

The DeletePresetResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePresetResponse {} ///

The detected properties of the input file. Elastic Transcoder identifies these values from the input file.

@@ -500,7 +499,7 @@ pub struct InputCaptions { ///

A section of the response body that provides information about the job that is created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

The Amazon Resource Name (ARN) for the job.

#[serde(rename = "Arn")] @@ -612,7 +611,7 @@ pub struct JobInput { ///

Outputs recommended instead.

If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobOutput { ///

The album art to be associated with the output file, if any.

#[serde(rename = "AlbumArt")] @@ -731,7 +730,7 @@ pub struct ListJobsByPipelineRequest { ///

The ListJobsByPipelineResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsByPipelineResponse { ///

An array of Job objects that are in the specified pipeline.

#[serde(rename = "Jobs")] @@ -761,7 +760,7 @@ pub struct ListJobsByStatusRequest { ///

The ListJobsByStatusResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsByStatusResponse { ///

An array of Job objects that have the specified status.

#[serde(rename = "Jobs")] @@ -788,7 +787,7 @@ pub struct ListPipelinesRequest { ///

A list of the pipelines associated with the current AWS account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPipelinesResponse { ///

A value that you use to access the second and subsequent pages of results, if any. When the pipelines fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

#[serde(rename = "NextPageToken")] @@ -815,7 +814,7 @@ pub struct ListPresetsRequest { ///

The ListPresetsResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPresetsResponse { ///

A value that you use to access the second and subsequent pages of results, if any. When the presets fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

#[serde(rename = "NextPageToken")] @@ -867,7 +866,7 @@ pub struct Permission { ///

The pipeline (queue) that is used to manage jobs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Pipeline { ///

The Amazon Resource Name (ARN) for the pipeline.

#[serde(rename = "Arn")] @@ -963,7 +962,7 @@ pub struct PlayReadyDrm { ///

Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. We recommend that you create only one master playlist per output format. The maximum number of master playlists in a job is 30.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Playlist { ///

The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

#[serde(rename = "Format")] @@ -997,7 +996,7 @@ pub struct Playlist { ///

Presets are templates that contain most of the settings for transcoding media files from one format to another. Elastic Transcoder includes some default presets for common formats, for example, several iPod and iPhone versions. You can also create your own presets for formats that aren't included among the default presets. You specify which preset you want to use when you create a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Preset { ///

The Amazon Resource Name (ARN) for the preset.

#[serde(rename = "Arn")] @@ -1092,7 +1091,7 @@ pub struct ReadJobRequest { ///

The ReadJobResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReadJobResponse { ///

A section of the response body that provides information about the job.

#[serde(rename = "Job")] @@ -1110,7 +1109,7 @@ pub struct ReadPipelineRequest { ///

The ReadPipelineResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReadPipelineResponse { ///

A section of the response body that provides information about the pipeline.

#[serde(rename = "Pipeline")] @@ -1132,7 +1131,7 @@ pub struct ReadPresetRequest { ///

The ReadPresetResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReadPresetResponse { ///

A section of the response body that provides information about the preset.

#[serde(rename = "Preset")] @@ -1159,7 +1158,7 @@ pub struct TestRoleRequest { ///

The TestRoleResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestRoleResponse { ///

If the Success element contains false, this value is an array of one or more error messages that were generated during the test process.

#[serde(rename = "Messages")] @@ -1223,7 +1222,7 @@ pub struct TimeSpan { ///

Details about the timing of a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Timing { ///

The time the job finished transcoding, in epoch milliseconds.

#[serde(rename = "FinishTimeMillis")] @@ -1252,7 +1251,7 @@ pub struct UpdatePipelineNotificationsRequest { ///

The UpdatePipelineNotificationsResponse structure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePipelineNotificationsResponse { ///

A section of the response body that provides information about the pipeline associated with this notification.

#[serde(rename = "Pipeline")] @@ -1298,7 +1297,7 @@ pub struct UpdatePipelineRequest { ///

When you update a pipeline, Elastic Transcoder returns the values that you specified in the request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePipelineResponse { ///

The pipeline updated by this UpdatePipelineResponse call.

#[serde(rename = "Pipeline")] @@ -1323,7 +1322,7 @@ pub struct UpdatePipelineStatusRequest { ///

When you update status for a pipeline, Elastic Transcoder returns the values that you specified in the request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePipelineStatusResponse { ///

A section of the response body that provides information about the pipeline.

#[serde(rename = "Pipeline")] @@ -1398,7 +1397,7 @@ pub struct VideoParameters { ///

Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Warning { ///

The code of the cross-regional warning.

#[serde(rename = "Code")] @@ -2439,10 +2438,7 @@ impl EtsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> EtsClient { - EtsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2456,10 +2452,14 @@ impl EtsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - EtsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EtsClient { + EtsClient { client, region } } } diff --git a/rusoto/services/elb/Cargo.toml b/rusoto/services/elb/Cargo.toml index 5b7fb803b96..b1ab66799b6 100644 --- a/rusoto/services/elb/Cargo.toml +++ b/rusoto/services/elb/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_elb" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/elb/README.md b/rusoto/services/elb/README.md index eb320d9cb61..6269d918918 100644 --- a/rusoto/services/elb/README.md +++ b/rusoto/services/elb/README.md @@ -23,9 +23,16 @@ To use `rusoto_elb` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_elb = "0.40.0" +rusoto_elb = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/elb/src/custom/mod.rs b/rusoto/services/elb/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/elb/src/custom/mod.rs +++ b/rusoto/services/elb/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/elb/src/generated.rs b/rusoto/services/elb/src/generated.rs index fdbec97e7be..3db23708a53 100644 --- a/rusoto/services/elb/src/generated.rs +++ b/rusoto/services/elb/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -6153,10 +6152,7 @@ impl ElbClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ElbClient { - ElbClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6170,10 +6166,14 @@ impl ElbClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ElbClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ElbClient { + ElbClient { client, region } } } @@ -6207,7 +6207,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6255,7 +6255,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6303,7 +6303,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6353,7 +6353,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6404,7 +6404,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6453,7 +6453,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6504,7 +6504,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6552,7 +6552,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6600,7 +6600,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6651,7 +6651,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6699,7 +6699,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6747,7 +6747,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6797,7 +6797,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6845,7 +6845,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6893,7 +6893,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6942,7 +6942,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6990,7 +6990,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7041,7 +7041,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7089,7 +7089,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7140,7 +7140,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7188,7 +7188,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7239,7 +7239,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7289,7 +7289,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7337,7 +7337,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7387,7 +7387,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7438,7 +7438,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7489,7 +7489,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7542,7 +7542,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -7594,7 +7594,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/elbv2/Cargo.toml b/rusoto/services/elbv2/Cargo.toml index aec970e7082..254fae92f26 100644 --- a/rusoto/services/elbv2/Cargo.toml +++ b/rusoto/services/elbv2/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_elbv2" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/elbv2/README.md b/rusoto/services/elbv2/README.md index abc3b41d085..6500297d897 100644 --- a/rusoto/services/elbv2/README.md +++ b/rusoto/services/elbv2/README.md @@ -23,9 +23,16 @@ To use `rusoto_elbv2` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_elbv2 = "0.40.0" +rusoto_elbv2 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/elbv2/src/custom/mod.rs b/rusoto/services/elbv2/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/elbv2/src/custom/mod.rs +++ b/rusoto/services/elbv2/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/elbv2/src/generated.rs b/rusoto/services/elbv2/src/generated.rs index f8c293fa89b..275abb22119 100644 --- a/rusoto/services/elbv2/src/generated.rs +++ b/rusoto/services/elbv2/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -231,7 +230,7 @@ impl AddListenerCertificatesInputSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct AddListenerCertificatesOutput { - ///

Information about the certificates.

+ ///

Information about the certificates in the certificate list.

pub certificates: Option>, } @@ -967,9 +966,9 @@ impl AuthenticateOidcActionUserInfoEndpointDeserializer { ///

Information about an Availability Zone.

#[derive(Default, Debug, Clone, PartialEq)] pub struct AvailabilityZone { - ///

[Network Load Balancers] The static IP address.

+ ///

[Network Load Balancers] If you need static IP addresses for your load balancer, you can specify one Elastic IP address per Availability Zone when you create the load balancer.

pub load_balancer_addresses: Option>, - ///

The ID of the subnet.

+ ///

The ID of the subnet. You can specify one subnet per Availability Zone.

pub subnet_id: Option, ///

The name of the Availability Zone.

pub zone_name: Option, @@ -1037,7 +1036,7 @@ impl CanonicalHostedZoneIdDeserializer { pub struct Certificate { ///

The Amazon Resource Name (ARN) of the certificate.

pub certificate_arn: Option, - ///

Indicates whether the certificate is the default certificate. Do not set IsDefault when specifying a certificate as an input parameter.

+ ///

Indicates whether the certificate is the default certificate. Do not set this value when specifying a certificate as an input. This value is not included in the output when describing a listener, but is included when describing listener certificates.

pub is_default: Option, } @@ -1204,15 +1203,15 @@ impl ConditionFieldNameDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateListenerInput { - ///

[HTTPS and TLS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

+ ///

[HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list for the listener, use AddListenerCertificates.

pub certificates: Option>, - ///

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

+ ///

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

pub default_actions: Vec, ///

The Amazon Resource Name (ARN) of the load balancer.

pub load_balancer_arn: String, ///

The port on which the load balancer is listening.

pub port: i64, - ///

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP and TLS.

+ ///

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP.

pub protocol: String, ///

[HTTPS and TLS listeners] The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.

pub ssl_policy: Option, @@ -1287,7 +1286,7 @@ pub struct CreateLoadBalancerInput { pub scheme: Option, ///

[Application Load Balancers] The IDs of the security groups for the load balancer.

pub security_groups: Option>, - ///

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet.

+ ///

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your load balancer.

pub subnet_mappings: Option>, ///

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

pub subnets: Option>, @@ -1371,9 +1370,9 @@ impl CreateLoadBalancerOutputDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateRuleInput { - ///

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

+ ///

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

pub actions: Vec, - ///

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • - .

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • _ - . $ / ~ " ' @ : +

  • & (using &amp;)

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

+ ///

The conditions. Each rule can include zero or one of the following conditions: http-request-method, host-header, path-pattern, and source-ip, and zero or more of the following conditions: http-header and query-string.

pub conditions: Vec, ///

The Amazon Resource Name (ARN) of the listener.

pub listener_arn: String, @@ -1429,19 +1428,19 @@ impl CreateRuleOutputDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateTargetGroupInput { - ///

Indicates whether health checks are enabled. If the target type is instance or ip, the default is true. If the target type is lambda, the default is false.

+ ///

Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance or ip, health checks are always enabled and cannot be disabled.

pub health_check_enabled: Option, - ///

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5–300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds. If the target type is instance or ip, the default is 30 seconds. If the target type is lambda, the default is 35 seconds.

+ ///

The approximate amount of time, in seconds, between health checks of an individual target. For HTTP and HTTPS health checks, the range is 5–300 seconds. For TCP health checks, the supported values are 10 and 30 seconds. If the target type is instance or ip, the default is 30 seconds. If the target type is lambda, the default is 35 seconds.

pub health_check_interval_seconds: Option, ///

[HTTP/HTTPS health checks] The ping path that is the destination on the targets for health checks. The default is /.

pub health_check_path: Option, ///

The port the load balancer uses when performing health checks on targets. The default is traffic-port, which is the port on which each target receives traffic from the load balancer.

pub health_check_port: Option, - ///

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers, the default is TCP. The TCP protocol is supported for health checks only if the protocol of the target group is TCP or TLS. The TLS protocol is not supported for health checks.

+ ///

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers, the default is TCP. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The TLS, UDP, and TCP_UDP protocols are not supported for health checks.

pub health_check_protocol: Option, - ///

The amount of time, in seconds, during which no response from a target means a failed health check. For Application Load Balancers, the range is 2–120 seconds and the default is 5 seconds if the target type is instance or ip and 30 seconds if the target type is lambda. For Network Load Balancers, this is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health checks.

+ ///

The amount of time, in seconds, during which no response from a target means a failed health check. For target groups with a protocol of HTTP or HTTPS, the default is 5 seconds. For target groups with a protocol of TCP or TLS, this value must be 6 seconds for HTTP health checks and 10 seconds for TCP and HTTPS health checks. If the target type is lambda, the default is 30 seconds.

pub health_check_timeout_seconds: Option, - ///

The number of consecutive health checks successes required before considering an unhealthy target healthy. For Application Load Balancers, the default is 5. For Network Load Balancers, the default is 3.

+ ///

The number of consecutive health checks successes required before considering an unhealthy target healthy. For target groups with a protocol of HTTP or HTTPS, the default is 5. For target groups with a protocol of TCP or TLS, the default is 3. If the target type is lambda, the default is 5.

pub healthy_threshold_count: Option, ///

[HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target.

pub matcher: Option, @@ -1449,13 +1448,13 @@ pub struct CreateTargetGroupInput { pub name: String, ///

The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. If the target is a Lambda function, this parameter does not apply.

pub port: Option, - ///

The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP and TLS. If the target is a Lambda function, this parameter does not apply.

+ ///

The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, or TCP_UDP. A TCP_UDP listener must be associated with a TCP_UDP target group. If the target is a Lambda function, this parameter does not apply.

pub protocol: Option, - ///

The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.

  • instance - Targets are specified by instance ID. This is the default value.

  • ip - Targets are specified by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

  • lambda - The target groups contains a single Lambda function.

+ ///

The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.

  • instance - Targets are specified by instance ID. This is the default value. If the target group protocol is UDP or TCP_UDP, the target type must be instance.

  • ip - Targets are specified by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

  • lambda - The target groups contains a single Lambda function.

pub target_type: Option, - ///

The number of consecutive health check failures required before considering a target unhealthy. For Application Load Balancers, the default is 2. For Network Load Balancers, this value must be the same as the healthy threshold count.

+ ///

The number of consecutive health check failures required before considering a target unhealthy. For target groups with a protocol of HTTP or HTTPS, the default is 2. For target groups with a protocol of TCP or TLS, this value must be the same as the healthy threshold count. If the target type is lambda, the default is 2.

pub unhealthy_threshold_count: Option, - ///

The identifier of the virtual private cloud (VPC). If the target is a Lambda function, this parameter does not apply.

+ ///

The identifier of the virtual private cloud (VPC). If the target is a Lambda function, this parameter does not apply. Otherwise, this parameter is required.

pub vpc_id: Option, } @@ -1825,7 +1824,7 @@ impl DescribeAccountLimitsInputSerializer { pub struct DescribeAccountLimitsOutput { ///

Information about the limits.

pub limits: Option>, - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, } @@ -1890,7 +1889,7 @@ impl DescribeListenerCertificatesInputSerializer { pub struct DescribeListenerCertificatesOutput { ///

Information about the certificates.

pub certificates: Option>, - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, } @@ -1966,7 +1965,7 @@ impl DescribeListenersInputSerializer { pub struct DescribeListenersOutput { ///

Information about the listeners.

pub listeners: Option>, - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, } @@ -2098,7 +2097,7 @@ impl DescribeLoadBalancersInputSerializer { pub struct DescribeLoadBalancersOutput { ///

Information about the load balancers.

pub load_balancers: Option>, - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, } @@ -2172,7 +2171,7 @@ impl DescribeRulesInputSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeRulesOutput { - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, ///

Information about the rules.

pub rules: Option>, @@ -2238,7 +2237,7 @@ impl DescribeSSLPoliciesInputSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeSSLPoliciesOutput { - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, ///

Information about the policies.

pub ssl_policies: Option>, @@ -2424,7 +2423,7 @@ impl DescribeTargetGroupsInputSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeTargetGroupsOutput { - ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

+ ///

If there are additional results, this is the marker for the next set of results. Otherwise, this is null.

pub next_marker: Option, ///

Information about the target groups.

pub target_groups: Option>, @@ -2691,8 +2690,10 @@ impl HealthCheckTimeoutSecondsDeserializer { Ok(obj) } } +///

Information about a host header condition.

#[derive(Default, Debug, Clone, PartialEq)] pub struct HostHeaderConditionConfig { + ///

One or more host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

If you specify multiple strings, the condition is satisfied if one of the strings matches the host name.

pub values: Option>, } @@ -2751,9 +2752,12 @@ impl HttpCodeDeserializer { Ok(obj) } } +///

Information about an HTTP header condition.

There is a set of standard HTTP header fields. You can also define custom HTTP header fields.

#[derive(Default, Debug, Clone, PartialEq)] pub struct HttpHeaderConditionConfig { + ///

The name of the HTTP header field. The maximum size is 40 characters. The header name is case insensitive. The allowed characters are specified by RFC 7230. Wildcards are not supported.

You can't use an HTTP header condition to specify the host header. Use HostHeaderConditionConfig to specify a host header condition.

pub http_header_name: Option, + ///

One or more strings to compare against the value of the HTTP header. The maximum size of each string is 128 characters. The comparison strings are case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

If the same header appears multiple times in the request, we search them in order until a match is found.

If you specify multiple strings, the condition is satisfied if one of the strings matches the value of the HTTP header. To require that all of the strings are a match, create one condition per string.

pub values: Option>, } @@ -2822,8 +2826,10 @@ impl HttpHeaderConditionNameDeserializer { Ok(obj) } } +///

Information about an HTTP method condition.

HTTP defines a set of request methods, also referred to as HTTP verbs. For more information, see the HTTP Method Registry. You can also define custom HTTP methods.

#[derive(Default, Debug, Clone, PartialEq)] pub struct HttpRequestMethodConditionConfig { + ///

The name of the request method. The maximum size is 40 characters. The allowed characters are A-Z, hyphen (-), and underscore (_). The comparison is case sensitive. Wildcards are not supported; therefore, the method name must be an exact match.

If you specify multiple strings, the condition is satisfied if one of the strings matches the HTTP request method. We recommend that you route GET and HEAD requests in the same way, because the response to a HEAD request may be cached.

pub values: Option>, } @@ -2980,7 +2986,7 @@ impl ListOfStringSerializer { ///

Information about a listener.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Listener { - ///

The SSL server certificate. You must provide a certificate if the protocol is HTTPS or TLS.

+ ///

[HTTPS or TLS listener] The default certificate for the listener.

pub certificates: Option>, ///

The default actions for the listener.

pub default_actions: Option>, @@ -2992,7 +2998,7 @@ pub struct Listener { pub port: Option, ///

The protocol for connections from clients to the load balancer.

pub protocol: Option, - ///

The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.

+ ///

[HTTPS or TLS listener] The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.

pub ssl_policy: Option, } @@ -3549,15 +3555,15 @@ impl MaxDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyListenerInput { - ///

[HTTPS and TLS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

+ ///

[HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

pub certificates: Option>, - ///

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

+ ///

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

pub default_actions: Option>, ///

The Amazon Resource Name (ARN) of the listener.

pub listener_arn: String, ///

The port for connections from clients to the load balancer.

pub port: Option, - ///

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP and TLS protocols.

+ ///

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols.

pub protocol: Option, ///

[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide.

pub ssl_policy: Option, @@ -3686,9 +3692,9 @@ impl ModifyLoadBalancerAttributesOutputDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyRuleInput { - ///

The actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

+ ///

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

pub actions: Option>, - ///

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • - .

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • _ - . $ / ~ " ' @ : +

  • & (using &amp;)

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

+ ///

The conditions. Each rule can include zero or one of the following conditions: http-request-method, host-header, path-pattern, and source-ip, and zero or more of the following conditions: http-header and query-string.

pub conditions: Option>, ///

The Amazon Resource Name (ARN) of the rule.

pub rule_arn: String, @@ -3806,13 +3812,13 @@ impl ModifyTargetGroupAttributesOutputDeserializer { pub struct ModifyTargetGroupInput { ///

Indicates whether health checks are enabled.

pub health_check_enabled: Option, - ///

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5–300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.

If the protocol of the target group is TCP, you can't modify this setting.

+ ///

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5 to 300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.

If the protocol of the target group is TCP, you can't modify this setting.

pub health_check_interval_seconds: Option, ///

[HTTP/HTTPS health checks] The ping path that is the destination for the health check request.

pub health_check_path: Option, ///

The port the load balancer uses when performing health checks on targets.

pub health_check_port: Option, - ///

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP or TLS. The TLS protocol is not supported for health checks.

If the protocol of the target group is TCP, you can't modify this setting.

+ ///

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The TLS, UDP, and TCP_UDP protocols are not supported for health checks.

If the protocol of the target group is TCP, you can't modify this setting.

pub health_check_protocol: Option, ///

[HTTP/HTTPS health checks] The amount of time, in seconds, during which no response means a failed health check.

If the protocol of the target group is TCP, you can't modify this setting.

pub health_check_timeout_seconds: Option, @@ -3936,8 +3942,10 @@ impl PathDeserializer { Ok(obj) } } +///

Information about a path pattern condition.

#[derive(Default, Debug, Clone, PartialEq)] pub struct PathPatternConditionConfig { + ///

One or more path patterns to compare against the request URL. The maximum size of each string is 128 characters. The comparison is case sensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

If you specify multiple strings, the condition is satisfied if one of them matches the request URL. The path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use QueryStringConditionConfig.

pub values: Option>, } @@ -4007,8 +4015,10 @@ impl ProtocolEnumDeserializer { Ok(obj) } } +///

Information about a query string condition.

The query string component of a URI starts after the first '?' character and is terminated by either a '#' character or the end of the URI. A typical query string contains key/value pairs separated by '&' characters. The allowed characters are specified by RFC 3986. Any character can be percentage encoded.

#[derive(Default, Debug, Clone, PartialEq)] pub struct QueryStringConditionConfig { + ///

One or more key/value pairs or values to find in the query string. The maximum size of each string is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, you must escape these characters in Values using a '\' character.

If you specify multiple key/value pairs or values, the condition is satisfied if one of them is found in the query string.

pub values: Option>, } @@ -4056,9 +4066,12 @@ impl QueryStringConditionConfigSerializer { } } +///

Information about a key/value pair.

#[derive(Default, Debug, Clone, PartialEq)] pub struct QueryStringKeyValuePair { + ///

The key. You can omit the key.

pub key: Option, + ///

The value.

pub value: Option, } @@ -4451,9 +4464,9 @@ impl ResourceArnsSerializer { ///

Information about a rule.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Rule { - ///

The actions.

+ ///

The actions. Each rule must include exactly one of the following types of actions: forward, redirect, or fixed-response, and it must be the last action to be performed.

pub actions: Option>, - ///

The conditions.

+ ///

The conditions. Each rule can include zero or one of the following conditions: http-request-method, host-header, path-pattern, and source-ip, and zero or more of the following conditions: http-header and query-string.

pub conditions: Option>, ///

Indicates whether this is the default rule.

pub is_default: Option, @@ -4520,15 +4533,21 @@ impl RuleArnsSerializer { ///

Information about a condition for a rule.

#[derive(Default, Debug, Clone, PartialEq)] pub struct RuleCondition { - ///

The name of the field. The possible values are host-header and path-pattern.

+ ///

The field in the HTTP request. The following are the possible values:

  • http-header

  • http-request-method

  • host-header

  • path-pattern

  • query-string

  • source-ip

pub field: Option, + ///

Information for a host header condition. Specify only when Field is host-header.

pub host_header_config: Option, + ///

Information for an HTTP header condition. Specify only when Field is http-header.

pub http_header_config: Option, + ///

Information for an HTTP method condition. Specify only when Field is http-request-method.

pub http_request_method_config: Option, + ///

Information for a path pattern condition. Specify only when Field is path-pattern.

pub path_pattern_config: Option, + ///

Information for a query string condition. Specify only when Field is query-string.

pub query_string_config: Option, + ///

Information for a source IP condition. Specify only when Field is source-ip.

pub source_ip_config: Option, - ///

The condition value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • - .

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

If the field name is path-pattern, you can specify a single path pattern (for example, /img/). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • _ - . $ / ~ " ' @ : +

  • & (using &amp;)

  • (matches 0 or more characters)

  • ? (matches exactly 1 character)

+ ///

The condition value. You can use Values if the rule contains only host-header and path-pattern conditions. Otherwise, you can use HostHeaderConfig for host-header conditions and PathPatternConfig for path-pattern conditions.

If Field is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters.

  • A-Z, a-z, 0-9

  • - .

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

If Field is path-pattern, you can specify a single path pattern (for example, /img/). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters.

  • A-Z, a-z, 0-9

  • _ - . $ / ~ " ' @ : +

  • & (using &amp;)

  • (matches 0 or more characters)

  • ? (matches exactly 1 character)

pub values: Option>, } @@ -4787,7 +4806,7 @@ impl SecurityGroupsSerializer { #[derive(Default, Debug, Clone, PartialEq)] pub struct SetIpAddressTypeInput { - ///

The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4.

+ ///

The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4. Network Load Balancers must use ipv4.

pub ip_address_type: String, ///

The Amazon Resource Name (ARN) of the load balancer.

pub load_balancer_arn: String, @@ -5014,8 +5033,10 @@ impl SetSubnetsOutputDeserializer { }) } } +///

Information about a source IP condition.

You can use this condition to route based on the IP address of the source that connects to the load balancer. If a client is behind a proxy, this is the IP address of the proxy not the IP address of the client.

#[derive(Default, Debug, Clone, PartialEq)] pub struct SourceIpConditionConfig { + ///

One or more source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.

pub values: Option>, } @@ -5800,7 +5821,7 @@ impl TargetGroupsDeserializer { pub struct TargetHealth { ///

A description of the target health that provides additional details. If the state is healthy, a description is not provided.

pub description: Option, - ///

The reason code.

If the target state is healthy, a reason code is not provided.

If the target state is initial, the reason code can be one of the following values:

  • Elb.RegistrationInProgress - The target is in the process of being registered with the load balancer.

  • Elb.InitialHealthChecking - The load balancer is still sending the target the minimum number of health checks required to determine its health status.

If the target state is unhealthy, the reason code can be one of the following values:

  • Target.ResponseCodeMismatch - The health checks did not return an expected HTTP code.

  • Target.Timeout - The health check requests timed out.

  • Target.FailedHealthChecks - The health checks failed because the connection to the target timed out, the target response was malformed, or the target failed the health check for an unknown reason.

  • Elb.InternalError - The health checks failed due to an internal error.

If the target state is unused, the reason code can be one of the following values:

  • Target.NotRegistered - The target is not registered with the target group.

  • Target.NotInUse - The target group is not used by any load balancer or the target is in an Availability Zone that is not enabled for its load balancer.

  • Target.IpUnusable - The target IP address is reserved for use by a load balancer.

  • Target.InvalidState - The target is in the stopped or terminated state.

If the target state is draining, the reason code can be the following value:

  • Target.DeregistrationInProgress - The target is in the process of being deregistered and the deregistration delay period has not expired.

If the target state is unavailable, the reason code can be the following value:

  • Target.HealthCheckDisabled - Health checks are disabled for the target group.

+ ///

The reason code.

If the target state is healthy, a reason code is not provided.

If the target state is initial, the reason code can be one of the following values:

  • Elb.RegistrationInProgress - The target is in the process of being registered with the load balancer.

  • Elb.InitialHealthChecking - The load balancer is still sending the target the minimum number of health checks required to determine its health status.

If the target state is unhealthy, the reason code can be one of the following values:

  • Target.ResponseCodeMismatch - The health checks did not return an expected HTTP code.

  • Target.Timeout - The health check requests timed out.

  • Target.FailedHealthChecks - The load balancer received an error while establishing a connection to the target or the target response was malformed.

  • Elb.InternalError - The health checks failed due to an internal error.

If the target state is unused, the reason code can be one of the following values:

  • Target.NotRegistered - The target is not registered with the target group.

  • Target.NotInUse - The target group is not used by any load balancer or the target is in an Availability Zone that is not enabled for its load balancer.

  • Target.IpUnusable - The target IP address is reserved for use by a load balancer.

  • Target.InvalidState - The target is in the stopped or terminated state.

If the target state is draining, the reason code can be the following value:

  • Target.DeregistrationInProgress - The target is in the process of being deregistered and the deregistration delay period has not expired.

If the target state is unavailable, the reason code can be the following value:

  • Target.HealthCheckDisabled - Health checks are disabled for the target group.

pub reason: Option, ///

The state of the target.

pub state: Option, @@ -8436,7 +8457,7 @@ impl Error for SetSubnetsError { } /// Trait representing the capabilities of the Elastic Load Balancing v2 API. Elastic Load Balancing v2 clients implement this trait. pub trait Elb { - ///

Adds the specified certificate to the specified HTTPS listener.

If the certificate was already added, the call is successful but the certificate is not added again.

To list the certificates for your listener, use DescribeListenerCertificates. To remove certificates from your listener, use RemoveListenerCertificates. To specify the default SSL server certificate, use ModifyListener.

+ ///

Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

To get the certificate list for a listener, use DescribeListenerCertificates. To remove certificates from the certificate list for a listener, use RemoveListenerCertificates. To replace the default certificate for a listener, use ModifyListener.

For more information, see SSL Certificates in the Application Load Balancers Guide.

fn add_listener_certificates( &self, input: AddListenerCertificatesInput, @@ -8505,13 +8526,13 @@ pub trait Elb { input: DescribeAccountLimitsInput, ) -> RusotoFuture; - ///

Describes the certificates for the specified HTTPS listener.

+ ///

Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

For more information, see SSL Certificates in the Application Load Balancers Guide.

fn describe_listener_certificates( &self, input: DescribeListenerCertificatesInput, ) -> RusotoFuture; - ///

Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.

+ ///

Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.

For an HTTPS or TLS listener, the output includes the default certificate for the listener. To describe the certificate list for the listener, use DescribeListenerCertificates.

fn describe_listeners( &self, input: DescribeListenersInput, @@ -8565,7 +8586,7 @@ pub trait Elb { input: DescribeTargetHealthInput, ) -> RusotoFuture; - ///

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and server certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and server certificate properties.

+ ///

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.

fn modify_listener( &self, input: ModifyListenerInput, @@ -8601,7 +8622,7 @@ pub trait Elb { input: RegisterTargetsInput, ) -> RusotoFuture; - ///

Removes the specified certificate from the specified HTTPS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

+ ///

Removes the specified certificate from the certificate list for the specified HTTPS or TLS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

fn remove_listener_certificates( &self, input: RemoveListenerCertificatesInput, @@ -8613,7 +8634,7 @@ pub trait Elb { input: RemoveTagsInput, ) -> RusotoFuture; - ///

Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.

Network Load Balancers must use ipv4.

+ ///

Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.

fn set_ip_address_type( &self, input: SetIpAddressTypeInput, @@ -8649,10 +8670,7 @@ impl ElbClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ElbClient { - ElbClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -8666,15 +8684,19 @@ impl ElbClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ElbClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ElbClient { + ElbClient { client, region } } } impl Elb for ElbClient { - ///

Adds the specified certificate to the specified HTTPS listener.

If the certificate was already added, the call is successful but the certificate is not added again.

To list the certificates for your listener, use DescribeListenerCertificates. To remove certificates from your listener, use RemoveListenerCertificates. To specify the default SSL server certificate, use ModifyListener.

+ ///

Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

To get the certificate list for a listener, use DescribeListenerCertificates. To remove certificates from the certificate list for a listener, use RemoveListenerCertificates. To replace the default certificate for a listener, use ModifyListener.

For more information, see SSL Certificates in the Application Load Balancers Guide.

fn add_listener_certificates( &self, input: AddListenerCertificatesInput, @@ -8703,7 +8725,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8751,7 +8773,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8799,7 +8821,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8850,7 +8872,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8901,7 +8923,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -8950,7 +8972,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9001,7 +9023,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9052,7 +9074,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9103,7 +9125,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9152,7 +9174,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9203,7 +9225,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9251,7 +9273,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9270,7 +9292,7 @@ impl Elb for ElbClient { }) } - ///

Describes the certificates for the specified HTTPS listener.

+ ///

Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

For more information, see SSL Certificates in the Application Load Balancers Guide.

fn describe_listener_certificates( &self, input: DescribeListenerCertificatesInput, @@ -9299,7 +9321,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9318,7 +9340,7 @@ impl Elb for ElbClient { }) } - ///

Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.

+ ///

Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.

For an HTTPS or TLS listener, the output includes the default certificate for the listener. To describe the certificate list for the listener, use DescribeListenerCertificates.

fn describe_listeners( &self, input: DescribeListenersInput, @@ -9350,7 +9372,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9399,7 +9421,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9447,7 +9469,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9498,7 +9520,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9548,7 +9570,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9599,7 +9621,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9647,7 +9669,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9697,7 +9719,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9747,7 +9769,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9766,7 +9788,7 @@ impl Elb for ElbClient { }) } - ///

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and server certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and server certificate properties.

+ ///

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.

fn modify_listener( &self, input: ModifyListenerInput, @@ -9798,7 +9820,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9846,7 +9868,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9897,7 +9919,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9946,7 +9968,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -9994,7 +10016,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10045,7 +10067,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10064,7 +10086,7 @@ impl Elb for ElbClient { }) } - ///

Removes the specified certificate from the specified HTTPS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

+ ///

Removes the specified certificate from the certificate list for the specified HTTPS or TLS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

fn remove_listener_certificates( &self, input: RemoveListenerCertificatesInput, @@ -10093,7 +10115,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10144,7 +10166,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10161,7 +10183,7 @@ impl Elb for ElbClient { }) } - ///

Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.

Network Load Balancers must use ipv4.

+ ///

Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.

fn set_ip_address_type( &self, input: SetIpAddressTypeInput, @@ -10193,7 +10215,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10244,7 +10266,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10295,7 +10317,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -10346,7 +10368,7 @@ impl Elb for ElbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/elbv2/src/lib.rs b/rusoto/services/elbv2/src/lib.rs index 35368cfe902..9fd1d5f3dc1 100644 --- a/rusoto/services/elbv2/src/lib.rs +++ b/rusoto/services/elbv2/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP/TLS). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC).

A Classic Load Balancer makes routing and load balancing decisions either at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and supports either EC2-Classic or a VPC. For more information, see the Elastic Load Balancing User Guide.

This reference covers the 2015-12-01 API, which supports Application Load Balancers and Network Load Balancers. The 2012-06-01 API supports Classic Load Balancers.

To get started, complete the following tasks:

  1. Create a load balancer using CreateLoadBalancer.

  2. Create a target group using CreateTargetGroup.

  3. Register targets for the target group using RegisterTargets.

  4. Create one or more listeners for your load balancer using CreateListener.

To delete a load balancer and its related resources, complete the following tasks:

  1. Delete the load balancer using DeleteLoadBalancer.

  2. Delete the target group using DeleteTargetGroup.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

+//!

Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. This reference covers Application Load Balancers and Network Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP/TLS). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC). For more information, see the Elastic Load Balancing User Guide.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

//! //! If you're using the service, you're probably looking for [ElbClient](struct.ElbClient.html) and [Elb](trait.Elb.html). diff --git a/rusoto/services/emr/Cargo.toml b/rusoto/services/emr/Cargo.toml index 352186fe5dc..cd0649086aa 100644 --- a/rusoto/services/emr/Cargo.toml +++ b/rusoto/services/emr/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_emr" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/emr/README.md b/rusoto/services/emr/README.md index 717eea39556..a75af603a3e 100644 --- a/rusoto/services/emr/README.md +++ b/rusoto/services/emr/README.md @@ -23,9 +23,16 @@ To use `rusoto_emr` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_emr = "0.40.0" +rusoto_emr = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/emr/src/custom/mod.rs b/rusoto/services/emr/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/emr/src/custom/mod.rs +++ b/rusoto/services/emr/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/emr/src/generated.rs b/rusoto/services/emr/src/generated.rs index eb6ffe5c51f..99a68d725b3 100644 --- a/rusoto/services/emr/src/generated.rs +++ b/rusoto/services/emr/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -35,7 +34,7 @@ pub struct AddInstanceFleetInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddInstanceFleetOutput { ///

The unique identifier of the cluster.

#[serde(rename = "ClusterId")] @@ -60,7 +59,7 @@ pub struct AddInstanceGroupsInput { ///

Output from an AddInstanceGroups call.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddInstanceGroupsOutput { ///

Instance group IDs of the newly created instance groups.

#[serde(rename = "InstanceGroupIds")] @@ -85,7 +84,7 @@ pub struct AddJobFlowStepsInput { ///

The output for the AddJobFlowSteps operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddJobFlowStepsOutput { ///

The identifiers of the list of steps added to the job flow.

#[serde(rename = "StepIds")] @@ -106,7 +105,7 @@ pub struct AddTagsInput { ///

This output indicates the result of adding tags to a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsOutput {} ///

With Amazon EMR release version 4.0 and later, the only accepted parameter is the application name. To pass arguments to applications, you use configuration classifications specified using configuration JSON objects. For more information, see Configuring Applications.

With earlier Amazon EMR releases, the application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument.

@@ -143,7 +142,7 @@ pub struct AutoScalingPolicy { ///

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingPolicyDescription { ///

The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.

#[serde(rename = "Constraints")] @@ -161,7 +160,7 @@ pub struct AutoScalingPolicyDescription { ///

The reason for an AutoScalingPolicyStatus change.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingPolicyStateChangeReason { ///

The code indicating the reason for the change in status.USER_REQUEST indicates that the scaling policy status was changed by a user. PROVISION_FAILURE indicates that the status change was because the policy failed to provision. CLEANUP_FAILURE indicates an error.

#[serde(rename = "Code")] @@ -175,7 +174,7 @@ pub struct AutoScalingPolicyStateChangeReason { ///

The status of an automatic scaling policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutoScalingPolicyStatus { ///

Indicates the status of the automatic scaling policy.

#[serde(rename = "State")] @@ -187,6 +186,30 @@ pub struct AutoScalingPolicyStatus { pub state_change_reason: Option, } +///

A configuration for Amazon EMR block public access. When BlockPublicSecurityGroupRules is set to true, Amazon EMR prevents cluster creation if one of the cluster's security groups has a rule that allows inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockPublicAccessConfiguration { + ///

Indicates whether EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

+ #[serde(rename = "BlockPublicSecurityGroupRules")] + pub block_public_security_group_rules: bool, + ///

Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.

By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of PermittedPublicSecurityGroupRuleRanges.

+ #[serde(rename = "PermittedPublicSecurityGroupRuleRanges")] + #[serde(skip_serializing_if = "Option::is_none")] + pub permitted_public_security_group_rule_ranges: Option>, +} + +///

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BlockPublicAccessConfigurationMetadata { + ///

The Amazon Resource Name that created or last modified the configuration.

+ #[serde(rename = "CreatedByArn")] + pub created_by_arn: String, + ///

The date and time that the configuration was created.

+ #[serde(rename = "CreationDateTime")] + pub creation_date_time: f64, +} + ///

Configuration of a bootstrap action.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BootstrapActionConfig { @@ -200,7 +223,7 @@ pub struct BootstrapActionConfig { ///

Reports the configuration of a bootstrap action in a cluster (job flow).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BootstrapActionDetail { ///

A description of the bootstrap action.

#[serde(rename = "BootstrapActionConfig")] @@ -210,7 +233,7 @@ pub struct BootstrapActionDetail { ///

Specification of the status of a CancelSteps request. Available only in Amazon EMR version 4.8.0 and later, excluding version 5.0.0.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelStepsInfo { ///

The reason for the failure if the CancelSteps request fails.

#[serde(rename = "Reason")] @@ -241,7 +264,7 @@ pub struct CancelStepsInput { ///

The output for the CancelSteps operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelStepsOutput { ///

A list of CancelStepsInfo, which shows the status of specified cancel requests for each StepID specified.

#[serde(rename = "CancelStepsInfoList")] @@ -288,7 +311,7 @@ pub struct CloudWatchAlarmDefinition { ///

The detailed description of the cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Cluster { ///

The applications installed on this cluster.

#[serde(rename = "Applications")] @@ -346,7 +369,7 @@ pub struct Cluster { #[serde(rename = "NormalizedInstanceHours")] #[serde(skip_serializing_if = "Option::is_none")] pub normalized_instance_hours: Option, - ///

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.

+ ///

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

#[serde(rename = "ReleaseLabel")] #[serde(skip_serializing_if = "Option::is_none")] pub release_label: Option, @@ -386,7 +409,7 @@ pub struct Cluster { #[serde(rename = "TerminationProtected")] #[serde(skip_serializing_if = "Option::is_none")] pub termination_protected: Option, - ///

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and manage the cluster if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

+ ///

This member will be deprecated.

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and manage the cluster if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

#[serde(rename = "VisibleToAllUsers")] #[serde(skip_serializing_if = "Option::is_none")] pub visible_to_all_users: Option, @@ -394,7 +417,7 @@ pub struct Cluster { ///

The reason that the cluster changed to its current state.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterStateChangeReason { ///

The programmatic code for the state change reason.

#[serde(rename = "Code")] @@ -408,7 +431,7 @@ pub struct ClusterStateChangeReason { ///

The detailed status of the cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterStatus { ///

The current state of the cluster.

#[serde(rename = "State")] @@ -426,7 +449,7 @@ pub struct ClusterStatus { ///

The summary description of the cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterSummary { ///

The unique identifier for the cluster.

#[serde(rename = "Id")] @@ -448,7 +471,7 @@ pub struct ClusterSummary { ///

Represents the timeline of the cluster's lifecycle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterTimeline { ///

The creation date and time of the cluster.

#[serde(rename = "CreationDateTime")] @@ -466,7 +489,7 @@ pub struct ClusterTimeline { ///

An entity describing an executable that runs on a cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Command { ///

Arguments for Amazon EMR to pass to the command for execution.

#[serde(rename = "Args")] @@ -510,7 +533,7 @@ pub struct CreateSecurityConfigurationInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSecurityConfigurationOutput { ///

The date and time the security configuration was created.

#[serde(rename = "CreationDateTime")] @@ -528,7 +551,7 @@ pub struct DeleteSecurityConfigurationInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSecurityConfigurationOutput {} ///

This input determines which cluster to describe.

@@ -541,7 +564,7 @@ pub struct DescribeClusterInput { ///

This output contains the description of the cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClusterOutput { ///

This output contains the details for the requested cluster.

#[serde(rename = "Cluster")] @@ -572,7 +595,7 @@ pub struct DescribeJobFlowsInput { ///

The output for the DescribeJobFlows operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobFlowsOutput { ///

A list of job flows matching the parameters supplied.

#[serde(rename = "JobFlows")] @@ -588,7 +611,7 @@ pub struct DescribeSecurityConfigurationInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSecurityConfigurationOutput { ///

The date and time the security configuration was created

#[serde(rename = "CreationDateTime")] @@ -617,7 +640,7 @@ pub struct DescribeStepInput { ///

This output contains the description of the cluster step.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStepOutput { ///

The step details for the requested step identifier.

#[serde(rename = "Step")] @@ -627,7 +650,7 @@ pub struct DescribeStepOutput { ///

Configuration of requested EBS block device associated with the instance group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EbsBlockDevice { ///

The device name that is exposed to the instance, such as /dev/sdh.

#[serde(rename = "Device")] @@ -666,7 +689,7 @@ pub struct EbsConfiguration { ///

EBS block device that's attached to an EC2 instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EbsVolume { ///

The device name that is exposed to the instance, such as /dev/sdh.

#[serde(rename = "Device")] @@ -680,7 +703,7 @@ pub struct EbsVolume { ///

Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Ec2InstanceAttributes { ///

A list of additional Amazon EC2 security group IDs for the master node.

#[serde(rename = "AdditionalMasterSecurityGroups")] @@ -698,7 +721,7 @@ pub struct Ec2InstanceAttributes { #[serde(rename = "Ec2KeyName")] #[serde(skip_serializing_if = "Option::is_none")] pub ec_2_key_name: Option, - ///

To launch the cluster in Amazon VPC, set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, the cluster is launched in the normal AWS cloud, outside of a VPC.

Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance type for nodes of a cluster launched in a VPC.

+ ///

Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.

#[serde(rename = "Ec2SubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub ec_2_subnet_id: Option, @@ -718,7 +741,7 @@ pub struct Ec2InstanceAttributes { #[serde(rename = "RequestedEc2AvailabilityZones")] #[serde(skip_serializing_if = "Option::is_none")] pub requested_ec_2_availability_zones: Option>, - ///

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

+ ///

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

#[serde(rename = "RequestedEc2SubnetIds")] #[serde(skip_serializing_if = "Option::is_none")] pub requested_ec_2_subnet_ids: Option>, @@ -730,7 +753,7 @@ pub struct Ec2InstanceAttributes { ///

The details of the step failure. The service attempts to detect the root cause for many common failures.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailureDetails { ///

The path to the log file where the step failure root cause was originally recorded.

#[serde(rename = "LogFile")] @@ -746,6 +769,20 @@ pub struct FailureDetails { pub reason: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetBlockPublicAccessConfigurationInput {} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetBlockPublicAccessConfigurationOutput { + ///

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.

+ #[serde(rename = "BlockPublicAccessConfiguration")] + pub block_public_access_configuration: BlockPublicAccessConfiguration, + ///

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

+ #[serde(rename = "BlockPublicAccessConfigurationMetadata")] + pub block_public_access_configuration_metadata: BlockPublicAccessConfigurationMetadata, +} + ///

A job flow step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct HadoopJarStepConfig { @@ -768,7 +805,7 @@ pub struct HadoopJarStepConfig { ///

A cluster step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HadoopStepConfig { ///

The list of command line arguments to pass to the JAR file's main function for execution.

#[serde(rename = "Args")] @@ -790,7 +827,7 @@ pub struct HadoopStepConfig { ///

Represents an EC2 instance provisioned as part of cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Instance { ///

The list of EBS volumes that are attached to this instance.

#[serde(rename = "EbsVolumes")] @@ -844,7 +881,7 @@ pub struct Instance { ///

Describes an instance fleet, which is a group of EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot instances, which are provisioned to meet a defined target capacity.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceFleet { ///

The unique identifier of the instance fleet.

#[serde(rename = "Id")] @@ -942,7 +979,7 @@ pub struct InstanceFleetProvisioningSpecifications { ///

Provides status change reason details for the instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceFleetStateChangeReason { ///

A code corresponding to the reason the state change occurred.

#[serde(rename = "Code")] @@ -956,7 +993,7 @@ pub struct InstanceFleetStateChangeReason { ///

The status of the instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceFleetStatus { ///

A code representing the instance fleet status.

  • PROVISIONING—The instance fleet is provisioning EC2 resources and is not yet ready to run jobs.

  • BOOTSTRAPPING—EC2 instances and other resources have been provisioned and the bootstrap actions specified for the instances are underway.

  • RUNNING—EC2 instances and other resources are running. They are either executing jobs or waiting to execute jobs.

  • RESIZING—A resize operation is underway. EC2 instances are either being added or removed.

  • SUSPENDED—A resize operation could not complete. Existing EC2 instances are running, but instances can't be added or removed.

  • TERMINATING—The instance fleet is terminating EC2 instances.

  • TERMINATED—The instance fleet is no longer active, and all EC2 instances have been terminated.

#[serde(rename = "State")] @@ -974,7 +1011,7 @@ pub struct InstanceFleetStatus { ///

Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceFleetTimeline { ///

The time and date the instance fleet was created.

#[serde(rename = "CreationDateTime")] @@ -992,7 +1029,7 @@ pub struct InstanceFleetTimeline { ///

This entity represents an instance group, which is a group of instances that have common purpose. For example, CORE instance group is used for HDFS.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceGroup { ///

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

#[serde(rename = "AutoScalingPolicy")] @@ -1104,7 +1141,7 @@ pub struct InstanceGroupConfig { ///

Detailed information about an instance group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceGroupDetail { ///

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specified in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

#[serde(rename = "BidPrice")] @@ -1183,7 +1220,7 @@ pub struct InstanceGroupModifyConfig { ///

The status change reason details for the instance group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceGroupStateChangeReason { ///

The programmable code for the state change reason.

#[serde(rename = "Code")] @@ -1197,7 +1234,7 @@ pub struct InstanceGroupStateChangeReason { ///

The details of the instance group status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceGroupStatus { ///

The current state of the instance group.

#[serde(rename = "State")] @@ -1215,7 +1252,7 @@ pub struct InstanceGroupStatus { ///

The timeline of the instance group lifecycle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceGroupTimeline { ///

The creation date and time of the instance group.

#[serde(rename = "CreationDateTime")] @@ -1250,7 +1287,7 @@ pub struct InstanceResizePolicy { ///

The details of the status change reason for the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceStateChangeReason { ///

The programmable code for the state change reason.

#[serde(rename = "Code")] @@ -1264,7 +1301,7 @@ pub struct InstanceStateChangeReason { ///

The instance status details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceStatus { ///

The current state of the instance.

#[serde(rename = "State")] @@ -1282,7 +1319,7 @@ pub struct InstanceStatus { ///

The timeline of the instance lifecycle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceTimeline { ///

The creation date and time of the instance.

#[serde(rename = "CreationDateTime")] @@ -1328,7 +1365,7 @@ pub struct InstanceTypeConfig { ///

The configuration specification for each instance type in an instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceTypeSpecification { ///

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD.

#[serde(rename = "BidPrice")] @@ -1362,7 +1399,7 @@ pub struct InstanceTypeSpecification { ///

A description of a cluster (job flow).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobFlowDetail { ///

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

#[serde(rename = "AmiVersion")] @@ -1412,7 +1449,7 @@ pub struct JobFlowDetail { #[serde(rename = "SupportedProducts")] #[serde(skip_serializing_if = "Option::is_none")] pub supported_products: Option>, - ///

Specifies whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

+ ///

This member will be deprecated.

Specifies whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

#[serde(rename = "VisibleToAllUsers")] #[serde(skip_serializing_if = "Option::is_none")] pub visible_to_all_users: Option, @@ -1420,7 +1457,7 @@ pub struct JobFlowDetail { ///

Describes the status of the cluster (job flow).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobFlowExecutionStatusDetail { ///

The creation date and time of the job flow.

#[serde(rename = "CreationDateTime")] @@ -1461,7 +1498,7 @@ pub struct JobFlowInstancesConfig { #[serde(rename = "Ec2KeyName")] #[serde(skip_serializing_if = "Option::is_none")] pub ec_2_key_name: Option, - ///

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, the cluster launches in the normal Amazon Web Services cloud, outside of an Amazon VPC, if the account launching the cluster supports EC2 Classic networks in the region where the cluster launches.

Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type for clusters launched in an Amazon VPC.

+ ///

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.

#[serde(rename = "Ec2SubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub ec_2_subnet_id: Option, @@ -1521,7 +1558,7 @@ pub struct JobFlowInstancesConfig { ///

Specify the type of Amazon EC2 instances that the cluster (job flow) runs on.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobFlowInstancesDetail { ///

The name of an Amazon EC2 key pair that can be used to ssh to the master node.

#[serde(rename = "Ec2KeyName")] @@ -1624,7 +1661,7 @@ pub struct ListBootstrapActionsInput { ///

This output contains the bootstrap actions detail.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBootstrapActionsOutput { ///

The bootstrap actions associated with the cluster.

#[serde(rename = "BootstrapActions")] @@ -1659,7 +1696,7 @@ pub struct ListClustersInput { ///

This contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClustersOutput { ///

The list of clusters for the account based on the given filters.

#[serde(rename = "Clusters")] @@ -1683,7 +1720,7 @@ pub struct ListInstanceFleetsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInstanceFleetsOutput { ///

The list of instance fleets for the cluster and given filters.

#[serde(rename = "InstanceFleets")] @@ -1709,7 +1746,7 @@ pub struct ListInstanceGroupsInput { ///

This input determines which instance groups to retrieve.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInstanceGroupsOutput { ///

The list of instance groups for the cluster and given filters.

#[serde(rename = "InstanceGroups")] @@ -1755,7 +1792,7 @@ pub struct ListInstancesInput { ///

This output contains the list of instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInstancesOutput { ///

The list of instances for the cluster and given filters.

#[serde(rename = "Instances")] @@ -1776,7 +1813,7 @@ pub struct ListSecurityConfigurationsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSecurityConfigurationsOutput { ///

A pagination token that indicates the next set of results to retrieve. Include the marker in the next ListSecurityConfiguration call to retrieve the next page of results, if required.

#[serde(rename = "Marker")] @@ -1810,7 +1847,7 @@ pub struct ListStepsInput { ///

This output contains the list of steps returned in reverse order. This means that the last step is the first element in the list.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStepsOutput { ///

The pagination token that indicates the next set of results to retrieve.

#[serde(rename = "Marker")] @@ -1871,6 +1908,18 @@ pub struct PlacementType { pub availability_zones: Option>, } +///

A list of port ranges that are permitted to allow inbound traffic from all public IP addresses. To specify a single port, use the same value for MinRange and MaxRange.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PortRange { + ///

The smallest port number in a specified range of port numbers.

+ #[serde(rename = "MaxRange")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_range: Option, + ///

The smallest port number in a specified range of port numbers.

+ #[serde(rename = "MinRange")] + pub min_range: i64, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutAutoScalingPolicyInput { ///

Specifies the definition of the automatic scaling policy.

@@ -1885,7 +1934,7 @@ pub struct PutAutoScalingPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAutoScalingPolicyOutput { ///

The automatic scaling policy definition.

#[serde(rename = "AutoScalingPolicy")] @@ -1901,6 +1950,17 @@ pub struct PutAutoScalingPolicyOutput { pub instance_group_id: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutBlockPublicAccessConfigurationInput { + ///

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.

+ #[serde(rename = "BlockPublicAccessConfiguration")] + pub block_public_access_configuration: BlockPublicAccessConfiguration, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutBlockPublicAccessConfigurationOutput {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RemoveAutoScalingPolicyInput { ///

Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.

@@ -1912,7 +1972,7 @@ pub struct RemoveAutoScalingPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveAutoScalingPolicyOutput {} ///

This input identifies a cluster and a list of tags to remove.

@@ -1928,7 +1988,7 @@ pub struct RemoveTagsInput { ///

This output indicates the result of removing tags from a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsOutput {} ///

Input to the RunJobFlow operation.

@@ -1988,7 +2048,7 @@ pub struct RunJobFlowInput { #[serde(rename = "NewSupportedProducts")] #[serde(skip_serializing_if = "Option::is_none")] pub new_supported_products: Option>, - ///

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.

+ ///

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

#[serde(rename = "ReleaseLabel")] #[serde(skip_serializing_if = "Option::is_none")] pub release_label: Option, @@ -2020,7 +2080,7 @@ pub struct RunJobFlowInput { #[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, - ///

Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it.

+ ///

This member will be deprecated.

Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it.

#[serde(rename = "VisibleToAllUsers")] #[serde(skip_serializing_if = "Option::is_none")] pub visible_to_all_users: Option, @@ -2028,7 +2088,7 @@ pub struct RunJobFlowInput { ///

The result of the RunJobFlow operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RunJobFlowOutput { ///

An unique identifier for the job flow.

#[serde(rename = "JobFlowId")] @@ -2099,7 +2159,7 @@ pub struct ScriptBootstrapActionConfig { ///

The creation date and time, and name, of a security configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityConfigurationSummary { ///

The date and time the security configuration was created.

#[serde(rename = "CreationDateTime")] @@ -2122,13 +2182,13 @@ pub struct SetTerminationProtectionInput { pub termination_protected: bool, } -///

The input to the SetVisibleToAllUsers action.

+///

This member will be deprecated.

The input to the SetVisibleToAllUsers action.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SetVisibleToAllUsersInput { ///

Identifiers of the job flows to receive the new visibility setting.

#[serde(rename = "JobFlowIds")] pub job_flow_ids: Vec, - ///

Whether the specified clusters are visible to all IAM users of the AWS account associated with the cluster. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the clusters. If it is set to False, only the IAM user that created a cluster can view and manage it.

+ ///

This member will be deprecated.

Whether the specified clusters are visible to all IAM users of the AWS account associated with the cluster. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the clusters. If it is set to False, only the IAM user that created a cluster can view and manage it.

#[serde(rename = "VisibleToAllUsers")] pub visible_to_all_users: bool, } @@ -2179,7 +2239,7 @@ pub struct SpotProvisioningSpecification { ///

This represents a step in a cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Step { ///

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

#[serde(rename = "ActionOnFailure")] @@ -2220,7 +2280,7 @@ pub struct StepConfig { ///

Combines the execution state and configuration of a step.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepDetail { ///

The description of the step status.

#[serde(rename = "ExecutionStatusDetail")] @@ -2232,7 +2292,7 @@ pub struct StepDetail { ///

The execution state of a step.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepExecutionStatusDetail { ///

The creation date and time of the step.

#[serde(rename = "CreationDateTime")] @@ -2256,7 +2316,7 @@ pub struct StepExecutionStatusDetail { ///

The details of the step state change reason.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepStateChangeReason { ///

The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.

#[serde(rename = "Code")] @@ -2270,7 +2330,7 @@ pub struct StepStateChangeReason { ///

The execution status details of the cluster step.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepStatus { ///

The details for the step failure including reason, message, and log file path where the root cause was identified.

#[serde(rename = "FailureDetails")] @@ -2292,7 +2352,7 @@ pub struct StepStatus { ///

The summary of the cluster step.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepSummary { ///

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

#[serde(rename = "ActionOnFailure")] @@ -2318,7 +2378,7 @@ pub struct StepSummary { ///

The timeline of the cluster step lifecycle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepTimeline { ///

The date and time when the cluster step was created.

#[serde(rename = "CreationDateTime")] @@ -2816,6 +2876,51 @@ impl Error for DescribeStepError { } } } +/// Errors returned by GetBlockPublicAccessConfiguration +#[derive(Debug, PartialEq)] +pub enum GetBlockPublicAccessConfigurationError { + ///

This exception occurs when there is an internal failure in the EMR service.

+ InternalServer(String), + ///

This exception occurs when there is something wrong with user input.

+ InvalidRequest(String), +} + +impl GetBlockPublicAccessConfigurationError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServerException" => { + return RusotoError::Service( + GetBlockPublicAccessConfigurationError::InternalServer(err.msg), + ) + } + "InvalidRequestException" => { + return RusotoError::Service( + GetBlockPublicAccessConfigurationError::InvalidRequest(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetBlockPublicAccessConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetBlockPublicAccessConfigurationError { + fn description(&self) -> &str { + match *self { + GetBlockPublicAccessConfigurationError::InternalServer(ref cause) => cause, + GetBlockPublicAccessConfigurationError::InvalidRequest(ref cause) => cause, + } + } +} /// Errors returned by ListBootstrapActions #[derive(Debug, PartialEq)] pub enum ListBootstrapActionsError { @@ -3194,6 +3299,51 @@ impl Error for PutAutoScalingPolicyError { match *self {} } } +/// Errors returned by PutBlockPublicAccessConfiguration +#[derive(Debug, PartialEq)] +pub enum PutBlockPublicAccessConfigurationError { + ///

This exception occurs when there is an internal failure in the EMR service.

+ InternalServer(String), + ///

This exception occurs when there is something wrong with user input.

+ InvalidRequest(String), +} + +impl PutBlockPublicAccessConfigurationError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServerException" => { + return RusotoError::Service( + PutBlockPublicAccessConfigurationError::InternalServer(err.msg), + ) + } + "InvalidRequestException" => { + return RusotoError::Service( + PutBlockPublicAccessConfigurationError::InvalidRequest(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutBlockPublicAccessConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutBlockPublicAccessConfigurationError { + fn description(&self) -> &str { + match *self { + PutBlockPublicAccessConfigurationError::InternalServer(ref cause) => cause, + PutBlockPublicAccessConfigurationError::InvalidRequest(ref cause) => cause, + } + } +} /// Errors returned by RemoveAutoScalingPolicy #[derive(Debug, PartialEq)] pub enum RemoveAutoScalingPolicyError {} @@ -3461,6 +3611,11 @@ pub trait Emr { input: DescribeStepInput, ) -> RusotoFuture; + ///

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

+ fn get_block_public_access_configuration( + &self, + ) -> RusotoFuture; + ///

Provides information about the bootstrap actions associated with a cluster.

fn list_bootstrap_actions( &self, @@ -3518,6 +3673,12 @@ pub trait Emr { input: PutAutoScalingPolicyInput, ) -> RusotoFuture; + ///

Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

+ fn put_block_public_access_configuration( + &self, + input: PutBlockPublicAccessConfigurationInput, + ) -> RusotoFuture; + ///

Removes an automatic scaling policy from a specified instance group within an EMR cluster.

fn remove_auto_scaling_policy( &self, @@ -3542,7 +3703,7 @@ pub trait Emr { input: SetTerminationProtectionInput, ) -> RusotoFuture<(), SetTerminationProtectionError>; - ///

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

+ ///

This member will be deprecated.

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

fn set_visible_to_all_users( &self, input: SetVisibleToAllUsersInput, @@ -3566,10 +3727,7 @@ impl EmrClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> EmrClient { - EmrClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3583,10 +3741,14 @@ impl EmrClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - EmrClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EmrClient { + EmrClient { client, region } } } @@ -3906,6 +4068,36 @@ impl Emr for EmrClient { }) } + ///

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

+ fn get_block_public_access_configuration( + &self, + ) -> RusotoFuture + { + let mut request = SignedRequest::new("POST", "elasticmapreduce", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "ElasticMapReduce.GetBlockPublicAccessConfiguration", + ); + request.set_payload(Some(bytes::Bytes::from_static(b"{}"))); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetBlockPublicAccessConfigurationError::from_response( + response, + )) + })) + } + }) + } + ///

Provides information about the bootstrap actions associated with a cluster.

fn list_bootstrap_actions( &self, @@ -4182,6 +4374,38 @@ impl Emr for EmrClient { }) } + ///

Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

+ fn put_block_public_access_configuration( + &self, + input: PutBlockPublicAccessConfigurationInput, + ) -> RusotoFuture + { + let mut request = SignedRequest::new("POST", "elasticmapreduce", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "ElasticMapReduce.PutBlockPublicAccessConfiguration", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(PutBlockPublicAccessConfigurationError::from_response( + response, + )) + })) + } + }) + } + ///

Removes an automatic scaling policy from a specified instance group within an EMR cluster.

fn remove_auto_scaling_policy( &self, @@ -4289,7 +4513,7 @@ impl Emr for EmrClient { }) } - ///

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

+ ///

This member will be deprecated.

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

fn set_visible_to_all_users( &self, input: SetVisibleToAllUsersInput, diff --git a/rusoto/services/events/Cargo.toml b/rusoto/services/events/Cargo.toml index b6b5c405048..7a9dc5f2539 100644 --- a/rusoto/services/events/Cargo.toml +++ b/rusoto/services/events/Cargo.toml @@ -1,13 +1,13 @@ [package] authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] -description = "AWS SDK for Rust - Amazon CloudWatch Events @ 2015-10-07" +description = "AWS SDK for Rust - Amazon EventBridge @ 2015-10-07" documentation = "https://docs.rs/rusoto_events" keywords = ["AWS", "Amazon", "events"] license = "MIT" name = "rusoto_events" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/events/README.md b/rusoto/services/events/README.md index d36b8c4d92f..536e33349db 100644 --- a/rusoto/services/events/README.md +++ b/rusoto/services/events/README.md @@ -1,6 +1,6 @@ -# Rusoto CloudWatchEvents -Rust SDK for Amazon CloudWatch Events +# Rusoto EventBridge +Rust SDK for Amazon EventBridge You may be looking for: @@ -23,9 +23,16 @@ To use `rusoto_events` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_events = "0.40.0" +rusoto_events = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/events/src/custom/mod.rs b/rusoto/services/events/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/events/src/custom/mod.rs +++ b/rusoto/services/events/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/events/src/generated.rs b/rusoto/services/events/src/generated.rs index 9af222bc4e9..07dab35c880 100644 --- a/rusoto/services/events/src/generated.rs +++ b/rusoto/services/events/src/generated.rs @@ -9,29 +9,35 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; -///

This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ActivateEventSourceRequest { + ///

The name of the partner event source to activate.

+ #[serde(rename = "Name")] + pub name: String, +} + +///

This structure specifies the VPC subnets and security groups for the task and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AwsVpcConfiguration { ///

Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE.

#[serde(rename = "AssignPublicIp")] #[serde(skip_serializing_if = "Option::is_none")] pub assign_public_ip: Option, - ///

Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.

+ ///

Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you don't specify a security group, the default security group for the VPC is used.

#[serde(rename = "SecurityGroups")] #[serde(skip_serializing_if = "Option::is_none")] pub security_groups: Option>, @@ -62,13 +68,13 @@ pub struct BatchParameters { ///

The name to use for this execution of the job, if the target is an AWS Batch job.

#[serde(rename = "JobName")] pub job_name: String, - ///

The retry strategy to use for failed jobs, if the target is an AWS Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1–10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

+ ///

The retry strategy to use for failed jobs if the target is an AWS Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1–10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

#[serde(rename = "RetryStrategy")] #[serde(skip_serializing_if = "Option::is_none")] pub retry_strategy: Option, } -///

The retry strategy to use for failed jobs, if the target is an AWS Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

+///

The retry strategy to use for failed jobs if the target is an AWS Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BatchRetryStrategy { ///

The number of times to attempt to retry, if the job fails. Valid values are 1–10.

@@ -77,22 +83,89 @@ pub struct BatchRetryStrategy { pub attempts: Option, } -///

A JSON string which you can use to limit the event bus permissions you are granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain AWS organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the AWS organization. Following is an example value for Condition:

'{"Type" : "StringEquals", "Key": "aws:PrincipalOrgID", "Value": "o-1234567890"}'

+///

A JSON string that you can use to limit the event bus permissions that you're granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain AWS organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the AWS organization. The following is an example value for Condition:

'{"Type" : "StringEquals", "Key": "aws:PrincipalOrgID", "Value": "o-1234567890"}'

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Condition { - ///

Specifies the key for the condition. Currently the only supported key is aws:PrincipalOrgID.

+ ///

The key for the condition. Currently, the only supported key is aws:PrincipalOrgID.

#[serde(rename = "Key")] pub key: String, - ///

Specifies the type of condition. Currently the only supported value is StringEquals.

+ ///

The type of condition. Currently, the only supported value is StringEquals.

#[serde(rename = "Type")] pub type_: String, - ///

Specifies the value for the key. Currently, this must be the ID of the organization.

+ ///

The value for the key. Currently, this must be the ID of the organization.

#[serde(rename = "Value")] pub value: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateEventBusRequest { + ///

If you're creating a partner event bus, this specifies the partner event source that the new event bus will be matched with.

+ #[serde(rename = "EventSourceName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_source_name: Option, + ///

The name of the new event bus.

The names of custom event buses can't contain the / character. You can't use the name default for a custom event bus because this name is already used for your account's default event bus.

If this is a partner event bus, the name must exactly match the name of the partner event source that this event bus is matched to. This name will include the / character.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateEventBusResponse { + ///

The ARN of the new event bus.

+ #[serde(rename = "EventBusArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_arn: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreatePartnerEventSourceRequest { + ///

The AWS account ID of the customer who is permitted to create a matching partner event bus for this partner event source.

+ #[serde(rename = "Account")] + pub account: String, + ///

The name of the partner event source. This name must be unique and must be in the format partner_name/event_namespace/event_name . The AWS account that wants to use this partner event source must create a partner event bus with a name that matches the name of the partner event source.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreatePartnerEventSourceResponse { + ///

The ARN of the partner event source.

+ #[serde(rename = "EventSourceArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_source_arn: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeactivateEventSourceRequest { + ///

The name of the partner event source to deactivate.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteEventBusRequest { + ///

The name of the event bus to delete.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeletePartnerEventSourceRequest { + ///

The AWS account ID of the AWS customer that the event source was created for.

+ #[serde(rename = "Account")] + pub account: String, + ///

The name of the event source to delete.

+ #[serde(rename = "Name")] + pub name: String, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteRuleRequest { + ///

The event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

If this is a managed rule, created by an AWS service on your behalf, you must specify Force as True to delete the rule. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

#[serde(rename = "Force")] #[serde(skip_serializing_if = "Option::is_none")] @@ -103,10 +176,15 @@ pub struct DeleteRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Serialize)] -pub struct DescribeEventBusRequest {} +pub struct DescribeEventBusRequest { + ///

The name of the event bus to show details for. If you omit this, the default event bus is displayed.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventBusResponse { ///

The Amazon Resource Name (ARN) of the account permitted to write events to the current account.

#[serde(rename = "Arn")] @@ -122,15 +200,75 @@ pub struct DescribeEventBusResponse { pub policy: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeEventSourceRequest { + ///

The name of the partner event source to display the details of.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeEventSourceResponse { + ///

The ARN of the partner event source.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The name of the SaaS partner that created the event source.

+ #[serde(rename = "CreatedBy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_by: Option, + ///

The date and time that the event source was created.

+ #[serde(rename = "CreationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_time: Option, + ///

The date and time that the event source will expire if you don't create a matching event bus.

+ #[serde(rename = "ExpirationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration_time: Option, + ///

The name of the partner event source.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The state of the event source. If it's ACTIVE, you have already created a matching event bus for this event source, and that event bus is active. If it's PENDING, either you haven't yet created a matching event bus, or that event bus is deactivated. If it's DELETED, you have created a matching event bus, but the event source has since been deleted.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribePartnerEventSourceRequest { + ///

The name of the event source to display.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribePartnerEventSourceResponse { + ///

The ARN of the event source.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The name of the event source.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeRuleRequest { + ///

The event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The name of the rule.

#[serde(rename = "Name")] pub name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRuleResponse { ///

The Amazon Resource Name (ARN) of the rule.

#[serde(rename = "Arn")] @@ -140,7 +278,11 @@ pub struct DescribeRuleResponse { #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide.

+ ///

The event bus associated with the rule.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, + ///

The event pattern. For more information, see Event Patterns in the Amazon EventBridge User Guide.

#[serde(rename = "EventPattern")] #[serde(skip_serializing_if = "Option::is_none")] pub event_pattern: Option, @@ -156,7 +298,7 @@ pub struct DescribeRuleResponse { #[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)".

+ ///

The scheduling expression: for example, "cron(0 20 * * ? *)" or "rate(5 minutes)".

#[serde(rename = "ScheduleExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub schedule_expression: Option, @@ -168,6 +310,10 @@ pub struct DescribeRuleResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DisableRuleRequest { + ///

The event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The name of the rule.

#[serde(rename = "Name")] pub name: String, @@ -184,7 +330,7 @@ pub struct EcsParameters { #[serde(rename = "LaunchType")] #[serde(skip_serializing_if = "Option::is_none")] pub launch_type: Option, - ///

Use this structure if the ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.

+ ///

Use this structure if the ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task doesn't use the awsvpc network mode, the task fails.

#[serde(rename = "NetworkConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub network_configuration: Option, @@ -203,24 +349,76 @@ pub struct EcsParameters { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct EnableRuleRequest { + ///

The event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The name of the rule.

#[serde(rename = "Name")] pub name: String, } +///

An event bus receives events from a source and routes them to rules associated with that event bus. Your account's default event bus receives rules from AWS services. A custom event bus can receive rules from AWS services as well as your custom applications and services. A partner event bus receives events from an event source created by an SaaS partner. These events come from the partners services or applications.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EventBus { + ///

The ARN of the event bus.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The name of the event bus.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The permissions policy of the event bus, describing which other AWS accounts can write events to this event bus.

+ #[serde(rename = "Policy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub policy: Option, +} + +///

A partner event source is created by an SaaS partner. If a customer creates a partner event bus that matches this event source, that AWS account can receive events from the partner's applications or services.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EventSource { + ///

The ARN of the event source.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The name of the partner that created the event source.

+ #[serde(rename = "CreatedBy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_by: Option, + ///

The date and time when the event source was created.

+ #[serde(rename = "CreationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_time: Option, + ///

The date and time when the event source will expire if the AWS account doesn't create a matching event bus for it.

+ #[serde(rename = "ExpirationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration_time: Option, + ///

The name of the event source.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The state of the event source. If it's ACTIVE, you have already created a matching event bus for this event source, and that event bus is active. If it's PENDING, either you haven't yet created a matching event bus, or that event bus is deactivated. If it's DELETED, you have created a matching event bus, but the event source has since been deleted.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + ///

Contains the parameters needed for you to provide custom input to a target based on one or more pieces of data extracted from the event.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct InputTransformer { - ///

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 10 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys cannot start with "AWS."

+ ///

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 10 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys can't start with "AWS".

#[serde(rename = "InputPathsMap")] #[serde(skip_serializing_if = "Option::is_none")] pub input_paths_map: Option<::std::collections::HashMap>, - ///

Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value> The InputTemplate must be valid JSON.

If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:

  • The placeholder cannot be used as an object key.

  • Object values cannot include quote marks.

The following example shows the syntax for using InputPathsMap and InputTemplate.

"InputTransformer":

{

"InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"},

"InputTemplate": "<instance> is in state <status>"

}

To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:

"InputTransformer":

{

"InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"},

"InputTemplate": "<instance> is in state \"<status>\""

}

+ ///

Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value>. The InputTemplate must be valid JSON.

If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:

  • The placeholder can't be used as an object key

  • Object values can't include quote marks

The following example shows the syntax for using InputPathsMap and InputTemplate.

"InputTransformer":

{

"InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"},

"InputTemplate": "<instance> is in state <status>"

}

To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:

"InputTransformer":

{

"InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"},

"InputTemplate": "<instance> is in state \"<status>\""

}

#[serde(rename = "InputTemplate")] pub input_template: String, } -///

This object enables you to specify a JSON path to extract from the event and use as the partition key for the Amazon Kinesis data stream, so that you can control the shard to which the event goes. If you do not include this parameter, the default is to use the eventId as the partition key.

+///

This object enables you to specify a JSON path to extract from the event and use as the partition key for the Amazon Kinesis data stream so that you can control the shard that the event goes to. If you don't include this parameter, the default is to use the eventId as the partition key.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct KinesisParameters { ///

The JSON path to be extracted from the event and used as the partition key. For more information, see Amazon Kinesis Streams Key Concepts in the Amazon Kinesis Streams Developer Guide.

@@ -228,8 +426,126 @@ pub struct KinesisParameters { pub partition_key_path: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListEventBusesRequest { + ///

Specifying this limits the number of results returned by this operation. The operation also returns a NextToken that you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

Specifying this limits the results to only those event buses with names that start with the specified prefix.

+ #[serde(rename = "NamePrefix")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name_prefix: Option, + ///

The token returned by a previous call to retrieve the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListEventBusesResponse { + ///

This list of event buses.

+ #[serde(rename = "EventBuses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_buses: Option>, + ///

A token you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListEventSourcesRequest { + ///

Specifying this limits the number of results returned by this operation. The operation also returns a NextToken that you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

Specifying this limits the results to only those partner event sources with names that start with the specified prefix.

+ #[serde(rename = "NamePrefix")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name_prefix: Option, + ///

The token returned by a previous call to retrieve the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListEventSourcesResponse { + ///

The list of event sources.

+ #[serde(rename = "EventSources")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_sources: Option>, + ///

A token you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListPartnerEventSourceAccountsRequest { + ///

The name of the partner event source to display account information about.

+ #[serde(rename = "EventSourceName")] + pub event_source_name: String, + ///

Specifying this limits the number of results returned by this operation. The operation also returns a NextToken that you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

The token returned by a previous call to this operation. Specifying this retrieves the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListPartnerEventSourceAccountsResponse { + ///

A token you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The list of partner event sources returned by the operation.

+ #[serde(rename = "PartnerEventSourceAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub partner_event_source_accounts: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListPartnerEventSourcesRequest { + ///

pecifying this limits the number of results returned by this operation. The operation also returns a NextToken that you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + ///

If you specify this, the results are limited to only those partner event sources that start with the string you specify.

+ #[serde(rename = "NamePrefix")] + pub name_prefix: String, + ///

The token returned by a previous call to this operation. Specifying this retrieves the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListPartnerEventSourcesResponse { + ///

A token you can use in a subsequent operation to retrieve the next set of results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The list of partner event sources returned by the operation.

+ #[serde(rename = "PartnerEventSources")] + #[serde(skip_serializing_if = "Option::is_none")] + pub partner_event_sources: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListRuleNamesByTargetRequest { + ///

Limits the results to show only the rules associated with the specified event bus.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The maximum number of results to return.

#[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -244,7 +560,7 @@ pub struct ListRuleNamesByTargetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRuleNamesByTargetResponse { ///

Indicates whether there are additional results to retrieve. If there are no more results, the value is null.

#[serde(rename = "NextToken")] @@ -258,6 +574,10 @@ pub struct ListRuleNamesByTargetResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListRulesRequest { + ///

Limits the results to show only the rules associated with the specified event bus.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The maximum number of results to return.

#[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -273,7 +593,7 @@ pub struct ListRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRulesResponse { ///

Indicates whether there are additional results to retrieve. If there are no more results, the value is null.

#[serde(rename = "NextToken")] @@ -287,15 +607,15 @@ pub struct ListRulesResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListTagsForResourceRequest { - ///

The ARN of the CloudWatch Events rule for which you want to view tags.

+ ///

The ARN of the rule for which you want to view tags.

#[serde(rename = "ResourceARN")] pub resource_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { - ///

The list of tag keys and values associated with the rule you specified

+ ///

The list of tag keys and values associated with the rule that you specified.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -303,6 +623,10 @@ pub struct ListTagsForResourceResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListTargetsByRuleRequest { + ///

The event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The maximum number of results to return.

#[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -317,7 +641,7 @@ pub struct ListTargetsByRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTargetsByRuleResponse { ///

Indicates whether there are additional results to retrieve. If there are no more results, the value is null.

#[serde(rename = "NextToken")] @@ -332,12 +656,48 @@ pub struct ListTargetsByRuleResponse { ///

This structure specifies the network configuration for an ECS task.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NetworkConfiguration { - ///

Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

+ ///

Use this structure to specify the VPC subnets and security groups for the task and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

#[serde(rename = "awsvpcConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub awsvpc_configuration: Option, } +///

A partner event source is created by an SaaS partner. If a customer creates a partner event bus that matches this event source, that AWS account can receive events from the partner's applications or services.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PartnerEventSource { + ///

The ARN of the partner event source.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The name of the partner event source.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +///

The AWS account that a partner event source has been offered to.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PartnerEventSourceAccount { + ///

The AWS account ID that the partner event source was offered to.

+ #[serde(rename = "Account")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account: Option, + ///

The date and time when the event source was created.

+ #[serde(rename = "CreationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_time: Option, + ///

The date and time when the event source will expire if the AWS account doesn't create a matching event bus for it.

+ #[serde(rename = "ExpirationTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration_time: Option, + ///

The state of the event source. If it's ACTIVE, you have already created a matching event bus for this event source, and that event bus is active. If it's PENDING, either you haven't yet created a matching event bus, or that event bus is deactivated. If it's DELETED, you have created a matching event bus, but the event source has since been deleted.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutEventsRequest { ///

The entry that defines an event in your system. You can specify several parameters for the entry such as the source and type of the event, resources associated with the event, and so on.

@@ -348,15 +708,19 @@ pub struct PutEventsRequest { ///

Represents an event to be submitted.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutEventsRequestEntry { - ///

A valid JSON string. There is no other schema imposed. The JSON string may contain fields and nested subobjects.

+ ///

A valid JSON string. There is no other schema imposed. The JSON string can contain fields and nested subobjects.

#[serde(rename = "Detail")] #[serde(skip_serializing_if = "Option::is_none")] pub detail: Option, - ///

Free-form string used to decide what fields to expect in the event detail.

+ ///

Free-form string used to decide which fields to expect in the event detail.

#[serde(rename = "DetailType")] #[serde(skip_serializing_if = "Option::is_none")] pub detail_type: Option, - ///

AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

+ ///

The event bus that will receive the event. Only the rules that are associated with this event bus can match the event.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, + ///

AWS resources, identified by Amazon Resource Name (ARN), that the event primarily concerns. Any number, including zero, can be present.

#[serde(rename = "Resources")] #[serde(skip_serializing_if = "Option::is_none")] pub resources: Option>, @@ -364,14 +728,14 @@ pub struct PutEventsRequestEntry { #[serde(rename = "Source")] #[serde(skip_serializing_if = "Option::is_none")] pub source: Option, - ///

The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used.

+ ///

The timestamp of the event, per RFC3339. If no timestamp is provided, the timestamp of the PutEvents call is used.

#[serde(rename = "Time")] #[serde(skip_serializing_if = "Option::is_none")] pub time: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutEventsResponse { ///

The successfully and unsuccessfully ingested events results. If the ingestion was successful, the entry has the event ID in it. Otherwise, you can use the error code and error message to identify the problem with the entry.

#[serde(rename = "Entries")] @@ -385,7 +749,7 @@ pub struct PutEventsResponse { ///

Represents an event that failed to be submitted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutEventsResultEntry { ///

The error code that indicates why the event submission failed.

#[serde(rename = "ErrorCode")] @@ -401,19 +765,86 @@ pub struct PutEventsResultEntry { pub event_id: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutPartnerEventsRequest { + ///

The list of events to write to the event bus.

+ #[serde(rename = "Entries")] + pub entries: Vec, +} + +///

The details about an event generated by an SaaS partner.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutPartnerEventsRequestEntry { + ///

A valid JSON string. There is no other schema imposed. The JSON string can contain fields and nested subobjects.

+ #[serde(rename = "Detail")] + #[serde(skip_serializing_if = "Option::is_none")] + pub detail: Option, + ///

A free-form string used to decide which fields to expect in the event detail.

+ #[serde(rename = "DetailType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub detail_type: Option, + ///

AWS resources, identified by Amazon Resource Name (ARN), that the event primarily concerns. Any number, including zero, can be present.

+ #[serde(rename = "Resources")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resources: Option>, + ///

The event source that is generating the evntry.

+ #[serde(rename = "Source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, + ///

The date and time of the event.

+ #[serde(rename = "Time")] + #[serde(skip_serializing_if = "Option::is_none")] + pub time: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutPartnerEventsResponse { + ///

The list of events from this operation that were successfully written to the partner event bus.

+ #[serde(rename = "Entries")] + #[serde(skip_serializing_if = "Option::is_none")] + pub entries: Option>, + ///

The number of events from this operation that couldn't be written to the partner event bus.

+ #[serde(rename = "FailedEntryCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_entry_count: Option, +} + +///

Represents an event that a partner tried to generate but failed.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutPartnerEventsResultEntry { + ///

The error code that indicates why the event submission failed.

+ #[serde(rename = "ErrorCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_code: Option, + ///

The error message that explains why the event submission failed.

+ #[serde(rename = "ErrorMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_message: Option, + ///

The ID of the event.

+ #[serde(rename = "EventId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutPermissionRequest { - ///

The action that you are enabling the other account to perform. Currently, this must be events:PutEvents.

+ ///

The action that you're enabling the other account to perform. Currently, this must be events:PutEvents.

#[serde(rename = "Action")] pub action: String, - ///

This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain AWS organization. For more information about AWS Organizations, see What Is AWS Organizations in the AWS Organizations User Guide.

If you specify Condition with an AWS organization ID, and specify "*" as the value for Principal, you grant permission to all the accounts in the named organization.

The Condition is a JSON string which must contain Type, Key, and Value fields.

+ ///

This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain AWS organization. For more information about AWS Organizations, see What Is AWS Organizations? in the AWS Organizations User Guide.

If you specify Condition with an AWS organization ID and specify "*" as the value for Principal, you grant permission to all the accounts in the named organization.

The Condition is a JSON string that must contain Type, Key, and Value fields.

#[serde(rename = "Condition")] #[serde(skip_serializing_if = "Option::is_none")] pub condition: Option, - ///

The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify "*" to permit any account to put events to your default event bus.

If you specify "*" without specifying Condition, avoid creating rules that may match undesirable events. To create more secure rules, make sure that the event pattern for each rule contains an account field with a specific account ID from which to receive events. Rules with an account field do not match any events sent from other accounts.

+ ///

The event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, + ///

The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify "*" to permit any account to put events to your default event bus.

If you specify "*" without specifying Condition, avoid creating rules that might match undesirable events. To create more secure rules, make sure that the event pattern for each rule contains an account field with a specific account ID to receive events from. Rules with an account field don't match any events sent from other accounts.

#[serde(rename = "Principal")] pub principal: String, - ///

An identifier string for the external account that you are granting permissions to. If you later want to revoke the permission for this external account, specify this StatementId when you run RemovePermission.

+ ///

An identifier string for the external account that you're granting permissions to. If you later want to revoke the permission for this external account, specify this StatementId when you run RemovePermission.

#[serde(rename = "StatementId")] pub statement_id: String, } @@ -424,18 +855,22 @@ pub struct PutRuleRequest { #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide.

+ ///

The event bus to associate with this rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, + ///

The event pattern. For more information, see Event Patterns in the Amazon EventBridge User Guide.

#[serde(rename = "EventPattern")] #[serde(skip_serializing_if = "Option::is_none")] pub event_pattern: Option, - ///

The name of the rule that you are creating or updating.

+ ///

The name of the rule that you're creating or updating.

#[serde(rename = "Name")] pub name: String, ///

The Amazon Resource Name (ARN) of the IAM role associated with the rule.

#[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

The scheduling expression. For example, "cron(0 20 * * ? *)" or "rate(5 minutes)".

+ ///

The scheduling expression: for example, "cron(0 20 * * ? *)" or "rate(5 minutes)".

#[serde(rename = "ScheduleExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub schedule_expression: Option, @@ -450,7 +885,7 @@ pub struct PutRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRuleResponse { ///

The Amazon Resource Name (ARN) of the rule.

#[serde(rename = "RuleArn")] @@ -460,6 +895,10 @@ pub struct PutRuleResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutTargetsRequest { + ///

The name of the event bus associated with the rule. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The name of the rule.

#[serde(rename = "Rule")] pub rule: String, @@ -469,7 +908,7 @@ pub struct PutTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutTargetsResponse { ///

The failed target entries.

#[serde(rename = "FailedEntries")] @@ -483,7 +922,7 @@ pub struct PutTargetsResponse { ///

Represents a target that failed to be added to a rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutTargetsResultEntry { ///

The error code that indicates why the target addition failed. If the value is ConcurrentModificationException, too many requests were made at the same time.

#[serde(rename = "ErrorCode")] @@ -501,6 +940,10 @@ pub struct PutTargetsResultEntry { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RemovePermissionRequest { + ///

The name of the event bus to revoke permissions for. If you omit this, the default event bus is used.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, ///

The statement ID corresponding to the account that is no longer allowed to put events to the default event bus.

#[serde(rename = "StatementId")] pub statement_id: String, @@ -508,7 +951,11 @@ pub struct RemovePermissionRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RemoveTargetsRequest { - ///

If this is a managed rule, created by an AWS service on your behalf, you must specify Force as True to remove targets. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

+ ///

The name of the event bus associated with the rule.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, + ///

If this is a managed rule created by an AWS service on your behalf, you must specify Force as True to remove targets. This parameter is ignored for rules that aren't managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

#[serde(rename = "Force")] #[serde(skip_serializing_if = "Option::is_none")] pub force: Option, @@ -521,7 +968,7 @@ pub struct RemoveTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTargetsResponse { ///

The failed target entries.

#[serde(rename = "FailedEntries")] @@ -535,7 +982,7 @@ pub struct RemoveTargetsResponse { ///

Represents a target that failed to be removed from a rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTargetsResultEntry { ///

The error code that indicates why the target removal failed. If the value is ConcurrentModificationException, too many requests were made at the same time.

#[serde(rename = "ErrorCode")] @@ -551,9 +998,9 @@ pub struct RemoveTargetsResultEntry { pub target_id: Option, } -///

Contains information about a rule in Amazon CloudWatch Events.

+///

Contains information about a rule in Amazon EventBridge.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Rule { ///

The Amazon Resource Name (ARN) of the rule.

#[serde(rename = "Arn")] @@ -563,11 +1010,15 @@ pub struct Rule { #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

The event pattern of the rule. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide.

+ ///

The event bus associated with the rule.

+ #[serde(rename = "EventBusName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub event_bus_name: Option, + ///

The event pattern of the rule. For more information, see Event Patterns in the Amazon EventBridge User Guide.

#[serde(rename = "EventPattern")] #[serde(skip_serializing_if = "Option::is_none")] pub event_pattern: Option, - ///

If the rule was created on behalf of your account by an AWS service, this field displays the principal name of the service that created the rule.

+ ///

If an AWS service created the rule on behalf of your account, this field displays the principal name of the service that created the rule.

#[serde(rename = "ManagedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub managed_by: Option, @@ -579,7 +1030,7 @@ pub struct Rule { #[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)".

+ ///

The scheduling expression: for example, "cron(0 20 * * ? *)" or "rate(5 minutes)".

#[serde(rename = "ScheduleExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub schedule_expression: Option, @@ -589,15 +1040,15 @@ pub struct Rule { pub state: Option, } -///

This parameter contains the criteria (either InstanceIds or a tag) used to specify which EC2 instances are to be sent the command.

+///

This parameter contains the criteria (either InstanceIds or a tag) used to specify which EC2 instances are to be sent the command.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RunCommandParameters { - ///

Currently, we support including only one RunCommandTarget block, which specifies either an array of InstanceIds or a tag.

+ ///

Currently, we support including only one RunCommandTarget block, which specifies either an array of InstanceIds or a tag.

#[serde(rename = "RunCommandTargets")] pub run_command_targets: Vec, } -///

Information about the EC2 instances that are to be sent the command, specified as key-value pairs. Each RunCommandTarget block can include only one key, but this key may specify multiple values.

+///

Information about the EC2 instances that are to be sent the command, specified as key-value pairs. Each RunCommandTarget block can include only one key, but this key can specify multiple values.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RunCommandTarget { ///

Can be either tag: tag-key or InstanceIds.

@@ -617,10 +1068,10 @@ pub struct SqsParameters { pub message_group_id: Option, } -///

A key-value pair associated with an AWS resource. In CloudWatch Events, rules support tagging.

+///

A key-value pair associated with an AWS resource. In EventBridge, rules support tagging.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { - ///

A string you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.

+ ///

A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.

#[serde(rename = "Key")] pub key: String, ///

The value for the specified tag key.

@@ -630,7 +1081,7 @@ pub struct Tag { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TagResourceRequest { - ///

The ARN of the CloudWatch Events rule that you're adding tags to.

+ ///

The ARN of the rule that you're adding tags to.

#[serde(rename = "ResourceARN")] pub resource_arn: String, ///

The list of key-value pairs to associate with the rule.

@@ -639,10 +1090,10 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} -///

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide.

+///

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you're setting the event bus of another account as the target and that account granted permission to your account through an organization instead of directly by the account ID, you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Target { ///

The Amazon Resource Name (ARN) of the target.

@@ -652,7 +1103,7 @@ pub struct Target { #[serde(rename = "BatchParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub batch_parameters: Option, - ///

Contains the Amazon ECS task definition and task count to be used, if the event target is an Amazon ECS task. For more information about Amazon ECS tasks, see Task Definitions in the Amazon EC2 Container Service Developer Guide.

+ ///

Contains the Amazon ECS task definition and task count to be used if the event target is an Amazon ECS task. For more information about Amazon ECS tasks, see Task Definitions in the Amazon EC2 Container Service Developer Guide.

#[serde(rename = "EcsParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub ecs_parameters: Option, @@ -671,7 +1122,7 @@ pub struct Target { #[serde(rename = "InputTransformer")] #[serde(skip_serializing_if = "Option::is_none")] pub input_transformer: Option, - ///

The custom parameter you can use to control the shard assignment, when the target is a Kinesis data stream. If you do not include this parameter, the default is to use the eventId as the partition key.

+ ///

The custom parameter that you can use to control the shard assignment when the target is a Kinesis data stream. If you don't include this parameter, the default is to use the eventId as the partition key.

#[serde(rename = "KinesisParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub kinesis_parameters: Option, @@ -694,13 +1145,13 @@ pub struct TestEventPatternRequest { ///

The event, in JSON format, to test against the event pattern.

#[serde(rename = "Event")] pub event: String, - ///

The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide.

+ ///

The event pattern. For more information, see Event Patterns in the Amazon EventBridge User Guide.

#[serde(rename = "EventPattern")] pub event_pattern: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestEventPatternResponse { ///

Indicates whether the event matches the event pattern.

#[serde(rename = "Result")] @@ -710,7 +1161,7 @@ pub struct TestEventPatternResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UntagResourceRequest { - ///

The ARN of the CloudWatch Events rule from which you are removing tags.

+ ///

The ARN of the rule that you're removing tags from.

#[serde(rename = "ResourceARN")] pub resource_arn: String, ///

The list of tag keys to remove from the resource.

@@ -719,32 +1170,34 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} -/// Errors returned by DeleteRule +/// Errors returned by ActivateEventSource #[derive(Debug, PartialEq)] -pub enum DeleteRuleError { - ///

There is concurrent modification on a rule or target.

- ConcurrentModification(String), +pub enum ActivateEventSourceError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

- ManagedRule(String), + ///

The specified state isn't a valid state for an event source.

+ InvalidState(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), } -impl DeleteRuleError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl ActivateEventSourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "ConcurrentModificationException" => { - return RusotoError::Service(DeleteRuleError::ConcurrentModification(err.msg)) - } "InternalException" => { - return RusotoError::Service(DeleteRuleError::Internal(err.msg)) + return RusotoError::Service(ActivateEventSourceError::Internal(err.msg)) } - "ManagedRuleException" => { - return RusotoError::Service(DeleteRuleError::ManagedRule(err.msg)) + "InvalidStateException" => { + return RusotoError::Service(ActivateEventSourceError::InvalidState(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ActivateEventSourceError::ResourceNotFound( + err.msg, + )) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -753,38 +1206,62 @@ impl DeleteRuleError { return RusotoError::Unknown(res); } } -impl fmt::Display for DeleteRuleError { +impl fmt::Display for ActivateEventSourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DeleteRuleError { +impl Error for ActivateEventSourceError { fn description(&self) -> &str { match *self { - DeleteRuleError::ConcurrentModification(ref cause) => cause, - DeleteRuleError::Internal(ref cause) => cause, - DeleteRuleError::ManagedRule(ref cause) => cause, + ActivateEventSourceError::Internal(ref cause) => cause, + ActivateEventSourceError::InvalidState(ref cause) => cause, + ActivateEventSourceError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by DescribeEventBus +/// Errors returned by CreateEventBus #[derive(Debug, PartialEq)] -pub enum DescribeEventBusError { +pub enum CreateEventBusError { + ///

There is concurrent modification on a resource.

+ ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

An entity that you specified does not exist.

+ ///

The specified state isn't a valid state for an event source.

+ InvalidState(String), + ///

You tried to create more resources than is allowed.

+ LimitExceeded(String), + ///

The resource that you're trying to create already exists.

+ ResourceAlreadyExists(String), + ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } -impl DescribeEventBusError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateEventBusError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(CreateEventBusError::ConcurrentModification( + err.msg, + )) + } "InternalException" => { - return RusotoError::Service(DescribeEventBusError::Internal(err.msg)) + return RusotoError::Service(CreateEventBusError::Internal(err.msg)) + } + "InvalidStateException" => { + return RusotoError::Service(CreateEventBusError::InvalidState(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateEventBusError::LimitExceeded(err.msg)) + } + "ResourceAlreadyExistsException" => { + return RusotoError::Service(CreateEventBusError::ResourceAlreadyExists( + err.msg, + )) } "ResourceNotFoundException" => { - return RusotoError::Service(DescribeEventBusError::ResourceNotFound(err.msg)) + return RusotoError::Service(CreateEventBusError::ResourceNotFound(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -793,37 +1270,57 @@ impl DescribeEventBusError { return RusotoError::Unknown(res); } } -impl fmt::Display for DescribeEventBusError { +impl fmt::Display for CreateEventBusError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeEventBusError { +impl Error for CreateEventBusError { fn description(&self) -> &str { match *self { - DescribeEventBusError::Internal(ref cause) => cause, - DescribeEventBusError::ResourceNotFound(ref cause) => cause, + CreateEventBusError::ConcurrentModification(ref cause) => cause, + CreateEventBusError::Internal(ref cause) => cause, + CreateEventBusError::InvalidState(ref cause) => cause, + CreateEventBusError::LimitExceeded(ref cause) => cause, + CreateEventBusError::ResourceAlreadyExists(ref cause) => cause, + CreateEventBusError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by DescribeRule +/// Errors returned by CreatePartnerEventSource #[derive(Debug, PartialEq)] -pub enum DescribeRuleError { +pub enum CreatePartnerEventSourceError { + ///

There is concurrent modification on a resource.

+ ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

An entity that you specified does not exist.

- ResourceNotFound(String), + ///

You tried to create more resources than is allowed.

+ LimitExceeded(String), + ///

The resource that you're trying to create already exists.

+ ResourceAlreadyExists(String), } -impl DescribeRuleError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreatePartnerEventSourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service( + CreatePartnerEventSourceError::ConcurrentModification(err.msg), + ) + } "InternalException" => { - return RusotoError::Service(DescribeRuleError::Internal(err.msg)) + return RusotoError::Service(CreatePartnerEventSourceError::Internal(err.msg)) } - "ResourceNotFoundException" => { - return RusotoError::Service(DescribeRuleError::ResourceNotFound(err.msg)) + "LimitExceededException" => { + return RusotoError::Service(CreatePartnerEventSourceError::LimitExceeded( + err.msg, + )) + } + "ResourceAlreadyExistsException" => { + return RusotoError::Service( + CreatePartnerEventSourceError::ResourceAlreadyExists(err.msg), + ) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -832,47 +1329,46 @@ impl DescribeRuleError { return RusotoError::Unknown(res); } } -impl fmt::Display for DescribeRuleError { +impl fmt::Display for CreatePartnerEventSourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeRuleError { +impl Error for CreatePartnerEventSourceError { fn description(&self) -> &str { match *self { - DescribeRuleError::Internal(ref cause) => cause, - DescribeRuleError::ResourceNotFound(ref cause) => cause, + CreatePartnerEventSourceError::ConcurrentModification(ref cause) => cause, + CreatePartnerEventSourceError::Internal(ref cause) => cause, + CreatePartnerEventSourceError::LimitExceeded(ref cause) => cause, + CreatePartnerEventSourceError::ResourceAlreadyExists(ref cause) => cause, } } } -/// Errors returned by DisableRule +/// Errors returned by DeactivateEventSource #[derive(Debug, PartialEq)] -pub enum DisableRuleError { - ///

There is concurrent modification on a rule or target.

- ConcurrentModification(String), +pub enum DeactivateEventSourceError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

- ManagedRule(String), - ///

An entity that you specified does not exist.

+ ///

The specified state isn't a valid state for an event source.

+ InvalidState(String), + ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } -impl DisableRuleError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeactivateEventSourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "ConcurrentModificationException" => { - return RusotoError::Service(DisableRuleError::ConcurrentModification(err.msg)) - } "InternalException" => { - return RusotoError::Service(DisableRuleError::Internal(err.msg)) + return RusotoError::Service(DeactivateEventSourceError::Internal(err.msg)) } - "ManagedRuleException" => { - return RusotoError::Service(DisableRuleError::ManagedRule(err.msg)) + "InvalidStateException" => { + return RusotoError::Service(DeactivateEventSourceError::InvalidState(err.msg)) } "ResourceNotFoundException" => { - return RusotoError::Service(DisableRuleError::ResourceNotFound(err.msg)) + return RusotoError::Service(DeactivateEventSourceError::ResourceNotFound( + err.msg, + )) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -881,49 +1377,33 @@ impl DisableRuleError { return RusotoError::Unknown(res); } } -impl fmt::Display for DisableRuleError { +impl fmt::Display for DeactivateEventSourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DisableRuleError { +impl Error for DeactivateEventSourceError { fn description(&self) -> &str { match *self { - DisableRuleError::ConcurrentModification(ref cause) => cause, - DisableRuleError::Internal(ref cause) => cause, - DisableRuleError::ManagedRule(ref cause) => cause, - DisableRuleError::ResourceNotFound(ref cause) => cause, + DeactivateEventSourceError::Internal(ref cause) => cause, + DeactivateEventSourceError::InvalidState(ref cause) => cause, + DeactivateEventSourceError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by EnableRule +/// Errors returned by DeleteEventBus #[derive(Debug, PartialEq)] -pub enum EnableRuleError { - ///

There is concurrent modification on a rule or target.

- ConcurrentModification(String), +pub enum DeleteEventBusError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

- ManagedRule(String), - ///

An entity that you specified does not exist.

- ResourceNotFound(String), } -impl EnableRuleError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteEventBusError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "ConcurrentModificationException" => { - return RusotoError::Service(EnableRuleError::ConcurrentModification(err.msg)) - } "InternalException" => { - return RusotoError::Service(EnableRuleError::Internal(err.msg)) - } - "ManagedRuleException" => { - return RusotoError::Service(EnableRuleError::ManagedRule(err.msg)) - } - "ResourceNotFoundException" => { - return RusotoError::Service(EnableRuleError::ResourceNotFound(err.msg)) + return RusotoError::Service(DeleteEventBusError::Internal(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -932,34 +1412,31 @@ impl EnableRuleError { return RusotoError::Unknown(res); } } -impl fmt::Display for EnableRuleError { +impl fmt::Display for DeleteEventBusError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for EnableRuleError { +impl Error for DeleteEventBusError { fn description(&self) -> &str { match *self { - EnableRuleError::ConcurrentModification(ref cause) => cause, - EnableRuleError::Internal(ref cause) => cause, - EnableRuleError::ManagedRule(ref cause) => cause, - EnableRuleError::ResourceNotFound(ref cause) => cause, + DeleteEventBusError::Internal(ref cause) => cause, } } } -/// Errors returned by ListRuleNamesByTarget +/// Errors returned by DeletePartnerEventSource #[derive(Debug, PartialEq)] -pub enum ListRuleNamesByTargetError { +pub enum DeletePartnerEventSourceError { ///

This exception occurs due to unexpected causes.

Internal(String), } -impl ListRuleNamesByTargetError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeletePartnerEventSourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "InternalException" => { - return RusotoError::Service(ListRuleNamesByTargetError::Internal(err.msg)) + return RusotoError::Service(DeletePartnerEventSourceError::Internal(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -968,31 +1445,46 @@ impl ListRuleNamesByTargetError { return RusotoError::Unknown(res); } } -impl fmt::Display for ListRuleNamesByTargetError { +impl fmt::Display for DeletePartnerEventSourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ListRuleNamesByTargetError { +impl Error for DeletePartnerEventSourceError { fn description(&self) -> &str { match *self { - ListRuleNamesByTargetError::Internal(ref cause) => cause, + DeletePartnerEventSourceError::Internal(ref cause) => cause, } } } -/// Errors returned by ListRules +/// Errors returned by DeleteRule #[derive(Debug, PartialEq)] -pub enum ListRulesError { +pub enum DeleteRuleError { + ///

There is concurrent modification on a resource.

+ ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), + ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ManagedRule(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), } -impl ListRulesError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DeleteRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(DeleteRuleError::ConcurrentModification(err.msg)) + } "InternalException" => { - return RusotoError::Service(ListRulesError::Internal(err.msg)) + return RusotoError::Service(DeleteRuleError::Internal(err.msg)) + } + "ManagedRuleException" => { + return RusotoError::Service(DeleteRuleError::ManagedRule(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteRuleError::ResourceNotFound(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1001,38 +1493,39 @@ impl ListRulesError { return RusotoError::Unknown(res); } } -impl fmt::Display for ListRulesError { +impl fmt::Display for DeleteRuleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ListRulesError { +impl Error for DeleteRuleError { fn description(&self) -> &str { match *self { - ListRulesError::Internal(ref cause) => cause, + DeleteRuleError::ConcurrentModification(ref cause) => cause, + DeleteRuleError::Internal(ref cause) => cause, + DeleteRuleError::ManagedRule(ref cause) => cause, + DeleteRuleError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by ListTagsForResource +/// Errors returned by DescribeEventBus #[derive(Debug, PartialEq)] -pub enum ListTagsForResourceError { +pub enum DescribeEventBusError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } -impl ListTagsForResourceError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeEventBusError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "InternalException" => { - return RusotoError::Service(ListTagsForResourceError::Internal(err.msg)) + return RusotoError::Service(DescribeEventBusError::Internal(err.msg)) } "ResourceNotFoundException" => { - return RusotoError::Service(ListTagsForResourceError::ResourceNotFound( - err.msg, - )) + return RusotoError::Service(DescribeEventBusError::ResourceNotFound(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1041,37 +1534,39 @@ impl ListTagsForResourceError { return RusotoError::Unknown(res); } } -impl fmt::Display for ListTagsForResourceError { +impl fmt::Display for DescribeEventBusError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ListTagsForResourceError { +impl Error for DescribeEventBusError { fn description(&self) -> &str { match *self { - ListTagsForResourceError::Internal(ref cause) => cause, - ListTagsForResourceError::ResourceNotFound(ref cause) => cause, + DescribeEventBusError::Internal(ref cause) => cause, + DescribeEventBusError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by ListTargetsByRule +/// Errors returned by DescribeEventSource #[derive(Debug, PartialEq)] -pub enum ListTargetsByRuleError { +pub enum DescribeEventSourceError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } -impl ListTargetsByRuleError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeEventSourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "InternalException" => { - return RusotoError::Service(ListTargetsByRuleError::Internal(err.msg)) + return RusotoError::Service(DescribeEventSourceError::Internal(err.msg)) } "ResourceNotFoundException" => { - return RusotoError::Service(ListTargetsByRuleError::ResourceNotFound(err.msg)) + return RusotoError::Service(DescribeEventSourceError::ResourceNotFound( + err.msg, + )) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1080,32 +1575,41 @@ impl ListTargetsByRuleError { return RusotoError::Unknown(res); } } -impl fmt::Display for ListTargetsByRuleError { +impl fmt::Display for DescribeEventSourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ListTargetsByRuleError { +impl Error for DescribeEventSourceError { fn description(&self) -> &str { match *self { - ListTargetsByRuleError::Internal(ref cause) => cause, - ListTargetsByRuleError::ResourceNotFound(ref cause) => cause, + DescribeEventSourceError::Internal(ref cause) => cause, + DescribeEventSourceError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by PutEvents +/// Errors returned by DescribePartnerEventSource #[derive(Debug, PartialEq)] -pub enum PutEventsError { +pub enum DescribePartnerEventSourceError { ///

This exception occurs due to unexpected causes.

Internal(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), } -impl PutEventsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribePartnerEventSourceError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "InternalException" => { - return RusotoError::Service(PutEventsError::Internal(err.msg)) + return RusotoError::Service(DescribePartnerEventSourceError::Internal(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribePartnerEventSourceError::ResourceNotFound( + err.msg, + )) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1114,48 +1618,37 @@ impl PutEventsError { return RusotoError::Unknown(res); } } -impl fmt::Display for PutEventsError { +impl fmt::Display for DescribePartnerEventSourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for PutEventsError { +impl Error for DescribePartnerEventSourceError { fn description(&self) -> &str { match *self { - PutEventsError::Internal(ref cause) => cause, + DescribePartnerEventSourceError::Internal(ref cause) => cause, + DescribePartnerEventSourceError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by PutPermission +/// Errors returned by DescribeRule #[derive(Debug, PartialEq)] -pub enum PutPermissionError { - ///

There is concurrent modification on a rule or target.

- ConcurrentModification(String), +pub enum DescribeRuleError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

The event bus policy is too long. For more information, see the limits.

- PolicyLengthExceeded(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } -impl PutPermissionError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "ConcurrentModificationException" => { - return RusotoError::Service(PutPermissionError::ConcurrentModification( - err.msg, - )) - } "InternalException" => { - return RusotoError::Service(PutPermissionError::Internal(err.msg)) - } - "PolicyLengthExceededException" => { - return RusotoError::Service(PutPermissionError::PolicyLengthExceeded(err.msg)) + return RusotoError::Service(DescribeRuleError::Internal(err.msg)) } "ResourceNotFoundException" => { - return RusotoError::Service(PutPermissionError::ResourceNotFound(err.msg)) + return RusotoError::Service(DescribeRuleError::ResourceNotFound(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1164,54 +1657,47 @@ impl PutPermissionError { return RusotoError::Unknown(res); } } -impl fmt::Display for PutPermissionError { +impl fmt::Display for DescribeRuleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for PutPermissionError { +impl Error for DescribeRuleError { fn description(&self) -> &str { match *self { - PutPermissionError::ConcurrentModification(ref cause) => cause, - PutPermissionError::Internal(ref cause) => cause, - PutPermissionError::PolicyLengthExceeded(ref cause) => cause, - PutPermissionError::ResourceNotFound(ref cause) => cause, + DescribeRuleError::Internal(ref cause) => cause, + DescribeRuleError::ResourceNotFound(ref cause) => cause, } } } -/// Errors returned by PutRule +/// Errors returned by DisableRule #[derive(Debug, PartialEq)] -pub enum PutRuleError { - ///

There is concurrent modification on a rule or target.

+pub enum DisableRuleError { + ///

There is concurrent modification on a resource.

ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

The event pattern is not valid.

- InvalidEventPattern(String), - ///

You tried to create more rules or add more targets to a rule than is allowed.

- LimitExceeded(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

ManagedRule(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), } -impl PutRuleError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DisableRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "ConcurrentModificationException" => { - return RusotoError::Service(PutRuleError::ConcurrentModification(err.msg)) + return RusotoError::Service(DisableRuleError::ConcurrentModification(err.msg)) } "InternalException" => { - return RusotoError::Service(PutRuleError::Internal(err.msg)) - } - "InvalidEventPatternException" => { - return RusotoError::Service(PutRuleError::InvalidEventPattern(err.msg)) - } - "LimitExceededException" => { - return RusotoError::Service(PutRuleError::LimitExceeded(err.msg)) + return RusotoError::Service(DisableRuleError::Internal(err.msg)) } "ManagedRuleException" => { - return RusotoError::Service(PutRuleError::ManagedRule(err.msg)) + return RusotoError::Service(DisableRuleError::ManagedRule(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DisableRuleError::ResourceNotFound(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -1220,7 +1706,542 @@ impl PutRuleError { return RusotoError::Unknown(res); } } -impl fmt::Display for PutRuleError { +impl fmt::Display for DisableRuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DisableRuleError { + fn description(&self) -> &str { + match *self { + DisableRuleError::ConcurrentModification(ref cause) => cause, + DisableRuleError::Internal(ref cause) => cause, + DisableRuleError::ManagedRule(ref cause) => cause, + DisableRuleError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by EnableRule +#[derive(Debug, PartialEq)] +pub enum EnableRuleError { + ///

There is concurrent modification on a resource.

+ ConcurrentModification(String), + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ManagedRule(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl EnableRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(EnableRuleError::ConcurrentModification(err.msg)) + } + "InternalException" => { + return RusotoError::Service(EnableRuleError::Internal(err.msg)) + } + "ManagedRuleException" => { + return RusotoError::Service(EnableRuleError::ManagedRule(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(EnableRuleError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for EnableRuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for EnableRuleError { + fn description(&self) -> &str { + match *self { + EnableRuleError::ConcurrentModification(ref cause) => cause, + EnableRuleError::Internal(ref cause) => cause, + EnableRuleError::ManagedRule(ref cause) => cause, + EnableRuleError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ListEventBuses +#[derive(Debug, PartialEq)] +pub enum ListEventBusesError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), +} + +impl ListEventBusesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListEventBusesError::Internal(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListEventBusesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListEventBusesError { + fn description(&self) -> &str { + match *self { + ListEventBusesError::Internal(ref cause) => cause, + } + } +} +/// Errors returned by ListEventSources +#[derive(Debug, PartialEq)] +pub enum ListEventSourcesError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), +} + +impl ListEventSourcesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListEventSourcesError::Internal(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListEventSourcesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListEventSourcesError { + fn description(&self) -> &str { + match *self { + ListEventSourcesError::Internal(ref cause) => cause, + } + } +} +/// Errors returned by ListPartnerEventSourceAccounts +#[derive(Debug, PartialEq)] +pub enum ListPartnerEventSourceAccountsError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl ListPartnerEventSourceAccountsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListPartnerEventSourceAccountsError::Internal( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service( + ListPartnerEventSourceAccountsError::ResourceNotFound(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListPartnerEventSourceAccountsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListPartnerEventSourceAccountsError { + fn description(&self) -> &str { + match *self { + ListPartnerEventSourceAccountsError::Internal(ref cause) => cause, + ListPartnerEventSourceAccountsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ListPartnerEventSources +#[derive(Debug, PartialEq)] +pub enum ListPartnerEventSourcesError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), +} + +impl ListPartnerEventSourcesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListPartnerEventSourcesError::Internal(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListPartnerEventSourcesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListPartnerEventSourcesError { + fn description(&self) -> &str { + match *self { + ListPartnerEventSourcesError::Internal(ref cause) => cause, + } + } +} +/// Errors returned by ListRuleNamesByTarget +#[derive(Debug, PartialEq)] +pub enum ListRuleNamesByTargetError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl ListRuleNamesByTargetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListRuleNamesByTargetError::Internal(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListRuleNamesByTargetError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListRuleNamesByTargetError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListRuleNamesByTargetError { + fn description(&self) -> &str { + match *self { + ListRuleNamesByTargetError::Internal(ref cause) => cause, + ListRuleNamesByTargetError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ListRules +#[derive(Debug, PartialEq)] +pub enum ListRulesError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl ListRulesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListRulesError::Internal(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListRulesError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListRulesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListRulesError { + fn description(&self) -> &str { + match *self { + ListRulesError::Internal(ref cause) => cause, + ListRulesError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListTagsForResourceError::Internal(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::Internal(ref cause) => cause, + ListTagsForResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ListTargetsByRule +#[derive(Debug, PartialEq)] +pub enum ListTargetsByRuleError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl ListTargetsByRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListTargetsByRuleError::Internal(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListTargetsByRuleError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTargetsByRuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTargetsByRuleError { + fn description(&self) -> &str { + match *self { + ListTargetsByRuleError::Internal(ref cause) => cause, + ListTargetsByRuleError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by PutEvents +#[derive(Debug, PartialEq)] +pub enum PutEventsError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), +} + +impl PutEventsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(PutEventsError::Internal(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutEventsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutEventsError { + fn description(&self) -> &str { + match *self { + PutEventsError::Internal(ref cause) => cause, + } + } +} +/// Errors returned by PutPartnerEvents +#[derive(Debug, PartialEq)] +pub enum PutPartnerEventsError { + ///

This exception occurs due to unexpected causes.

+ Internal(String), +} + +impl PutPartnerEventsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(PutPartnerEventsError::Internal(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutPartnerEventsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutPartnerEventsError { + fn description(&self) -> &str { + match *self { + PutPartnerEventsError::Internal(ref cause) => cause, + } + } +} +/// Errors returned by PutPermission +#[derive(Debug, PartialEq)] +pub enum PutPermissionError { + ///

There is concurrent modification on a resource.

+ ConcurrentModification(String), + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

The event bus policy is too long. For more information, see the limits.

+ PolicyLengthExceeded(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl PutPermissionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(PutPermissionError::ConcurrentModification( + err.msg, + )) + } + "InternalException" => { + return RusotoError::Service(PutPermissionError::Internal(err.msg)) + } + "PolicyLengthExceededException" => { + return RusotoError::Service(PutPermissionError::PolicyLengthExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(PutPermissionError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutPermissionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutPermissionError { + fn description(&self) -> &str { + match *self { + PutPermissionError::ConcurrentModification(ref cause) => cause, + PutPermissionError::Internal(ref cause) => cause, + PutPermissionError::PolicyLengthExceeded(ref cause) => cause, + PutPermissionError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by PutRule +#[derive(Debug, PartialEq)] +pub enum PutRuleError { + ///

There is concurrent modification on a resource.

+ ConcurrentModification(String), + ///

This exception occurs due to unexpected causes.

+ Internal(String), + ///

The event pattern isn't valid.

+ InvalidEventPattern(String), + ///

You tried to create more resources than is allowed.

+ LimitExceeded(String), + ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ManagedRule(String), + ///

An entity that you specified doesn't exist.

+ ResourceNotFound(String), +} + +impl PutRuleError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(PutRuleError::ConcurrentModification(err.msg)) + } + "InternalException" => { + return RusotoError::Service(PutRuleError::Internal(err.msg)) + } + "InvalidEventPatternException" => { + return RusotoError::Service(PutRuleError::InvalidEventPattern(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(PutRuleError::LimitExceeded(err.msg)) + } + "ManagedRuleException" => { + return RusotoError::Service(PutRuleError::ManagedRule(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(PutRuleError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutRuleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } @@ -1233,21 +2254,22 @@ impl Error for PutRuleError { PutRuleError::InvalidEventPattern(ref cause) => cause, PutRuleError::LimitExceeded(ref cause) => cause, PutRuleError::ManagedRule(ref cause) => cause, + PutRuleError::ResourceNotFound(ref cause) => cause, } } } /// Errors returned by PutTargets #[derive(Debug, PartialEq)] pub enum PutTargetsError { - ///

There is concurrent modification on a rule or target.

+ ///

There is concurrent modification on a resource.

ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

You tried to create more rules or add more targets to a rule than is allowed.

+ ///

You tried to create more resources than is allowed.

LimitExceeded(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

ManagedRule(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } @@ -1296,11 +2318,11 @@ impl Error for PutTargetsError { /// Errors returned by RemovePermission #[derive(Debug, PartialEq)] pub enum RemovePermissionError { - ///

There is concurrent modification on a rule or target.

+ ///

There is concurrent modification on a resource.

ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } @@ -1343,13 +2365,13 @@ impl Error for RemovePermissionError { /// Errors returned by RemoveTargets #[derive(Debug, PartialEq)] pub enum RemoveTargetsError { - ///

There is concurrent modification on a rule or target.

+ ///

There is concurrent modification on a resource.

ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

ManagedRule(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } @@ -1396,13 +2418,13 @@ impl Error for RemoveTargetsError { /// Errors returned by TagResource #[derive(Debug, PartialEq)] pub enum TagResourceError { - ///

There is concurrent modification on a rule or target.

+ ///

There is concurrent modification on a resource.

ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

ManagedRule(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } @@ -1449,7 +2471,7 @@ impl Error for TagResourceError { pub enum TestEventPatternError { ///

This exception occurs due to unexpected causes.

Internal(String), - ///

The event pattern is not valid.

+ ///

The event pattern isn't valid.

InvalidEventPattern(String), } @@ -1488,13 +2510,13 @@ impl Error for TestEventPatternError { /// Errors returned by UntagResource #[derive(Debug, PartialEq)] pub enum UntagResourceError { - ///

There is concurrent modification on a rule or target.

+ ///

There is concurrent modification on a resource.

ConcurrentModification(String), ///

This exception occurs due to unexpected causes.

Internal(String), - ///

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

+ ///

An AWS service created this rule on behalf of your account. That service manages it. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You can't modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

ManagedRule(String), - ///

An entity that you specified does not exist.

+ ///

An entity that you specified doesn't exist.

ResourceNotFound(String), } @@ -1538,39 +2560,114 @@ impl Error for UntagResourceError { } } } -/// Trait representing the capabilities of the Amazon CloudWatch Events API. Amazon CloudWatch Events clients implement this trait. -pub trait CloudWatchEvents { - ///

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

+/// Trait representing the capabilities of the Amazon EventBridge API. Amazon EventBridge clients implement this trait. +pub trait EventBridge { + ///

Activates a partner event source that has been deactivated. Once activated, your matching event bus will start receiving events from the event source.

This operation is performed by AWS customers, not by SaaS partners.

+ fn activate_event_source( + &self, + input: ActivateEventSourceRequest, + ) -> RusotoFuture<(), ActivateEventSourceError>; + + ///

Creates a new event bus within your account. This can be a custom event bus which you can use to receive events from your own custom applications and services, or it can be a partner event bus which can be matched to a partner event source.

This operation is used by AWS customers, not by SaaS partners.

+ fn create_event_bus( + &self, + input: CreateEventBusRequest, + ) -> RusotoFuture; + + ///

Called by an SaaS partner to create a partner event source.

This operation is not used by AWS customers.

Each partner event source can be used by one AWS account to create a matching partner event bus in that AWS account. A SaaS partner must create one partner event source for each AWS account that wants to receive those event types.

A partner event source creates events based on resources in the SaaS partner's service or application.

An AWS account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using AWS Events rules and targets.

Partner event source names follow this format:

aws.partner/partnername/eventnamespace/eventname

  • partnername is determined during partner registration and identifies the partner to AWS customers.

  • For eventnamespace, we recommend that partners use a string that identifies the AWS customer within the partner's system. This should not be the customer's AWS account ID.

  • eventname is determined by the partner, and should uniquely identify an event-generating resource within the partner system. This should help AWS customers decide whether to create an event bus to receive these events.

+ fn create_partner_event_source( + &self, + input: CreatePartnerEventSourceRequest, + ) -> RusotoFuture; + + ///

An AWS customer uses this operation to temporarily stop receiving events from the specified partner event source. The matching event bus isn't deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it's deleted.

To activate a deactivated partner event source, use ActivateEventSource.

+ fn deactivate_event_source( + &self, + input: DeactivateEventSourceRequest, + ) -> RusotoFuture<(), DeactivateEventSourceError>; + + ///

Deletes the specified custom event bus or partner event bus. All rules associated with this event bus are also deleted. You can't delete your account's default event bus.

This operation is performed by AWS customers, not by SaaS partners.

+ fn delete_event_bus( + &self, + input: DeleteEventBusRequest, + ) -> RusotoFuture<(), DeleteEventBusError>; + + ///

This operation is used by SaaS partners to delete a partner event source. AWS customers don't use this operation.

When you delete an event source, the status of the corresponding partner event bus in the AWS customer account becomes DELETED.

+ fn delete_partner_event_source( + &self, + input: DeletePartnerEventSourceRequest, + ) -> RusotoFuture<(), DeletePartnerEventSourceError>; + + ///

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you're sure that the other service isn't still using that rule.

fn delete_rule(&self, input: DeleteRuleRequest) -> RusotoFuture<(), DeleteRuleError>; - ///

Displays the external AWS accounts that are permitted to write events to your account using your account's event bus, and the associated policy. To enable your account to receive events from other accounts, use PutPermission.

- fn describe_event_bus(&self) -> RusotoFuture; + ///

Displays details about an event bus in your account. This can include the external AWS accounts that are permitted to write events to your default event bus, and the associated policy. For custom event buses and partner event buses, it displays the name, ARN, policy, state, and creation time.

To enable your account to receive events from other accounts on its default event bus, use PutPermission.

For more information about partner event buses, see CreateEventBus.

+ fn describe_event_bus( + &self, + input: DescribeEventBusRequest, + ) -> RusotoFuture; + + ///

This operation lists details about a partner event source that is shared with your account.

This operation is run by AWS customers, not by SaaS partners.

+ fn describe_event_source( + &self, + input: DescribeEventSourceRequest, + ) -> RusotoFuture; + + ///

An SaaS partner can use this operation to list details about a partner event source that they have created.

AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

+ fn describe_partner_event_source( + &self, + input: DescribePartnerEventSourceRequest, + ) -> RusotoFuture; - ///

Describes the specified rule.

DescribeRule does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

+ ///

Describes the specified rule.

DescribeRule doesn't list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

fn describe_rule( &self, input: DescribeRuleRequest, ) -> RusotoFuture; - ///

Disables the specified rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

When you disable a rule, incoming events might continue to match to the disabled rule. Allow a short period of time for changes to take effect.

+ ///

Disables the specified rule. A disabled rule won't match any events and won't self-trigger if it has a schedule expression.

When you disable a rule, incoming events might continue to match to the disabled rule. Allow a short period of time for changes to take effect.

fn disable_rule(&self, input: DisableRuleRequest) -> RusotoFuture<(), DisableRuleError>; - ///

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

+ ///

Enables the specified rule. If the rule doesn't exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

fn enable_rule(&self, input: EnableRuleRequest) -> RusotoFuture<(), EnableRuleError>; - ///

Lists the rules for the specified target. You can see which of the rules in Amazon CloudWatch Events can invoke a specific target in your account.

+ ///

Lists all the event buses in your account, including the default event bus, custom event buses, and partner event buses.

This operation is run by AWS customers, not by SaaS partners.

+ fn list_event_buses( + &self, + input: ListEventBusesRequest, + ) -> RusotoFuture; + + ///

You can use this to see all the partner event sources that have been shared with your AWS account. For more information about partner event sources, see CreateEventBus.

This operation is run by AWS customers, not by SaaS partners.

+ fn list_event_sources( + &self, + input: ListEventSourcesRequest, + ) -> RusotoFuture; + + ///

An SaaS partner can use this operation to display the AWS account ID that a particular partner event source name is associated with.

This operation is used by SaaS partners, not by AWS customers.

+ fn list_partner_event_source_accounts( + &self, + input: ListPartnerEventSourceAccountsRequest, + ) -> RusotoFuture; + + ///

An SaaS partner can use this operation to list all the partner event source names that they have created.

This operation is not used by AWS customers.

+ fn list_partner_event_sources( + &self, + input: ListPartnerEventSourcesRequest, + ) -> RusotoFuture; + + ///

Lists the rules for the specified target. You can see which rules can invoke a specific target in your account.

fn list_rule_names_by_target( &self, input: ListRuleNamesByTargetRequest, ) -> RusotoFuture; - ///

Lists your Amazon CloudWatch Events rules. You can either list all the rules or you can provide a prefix to match to the rule names.

ListRules does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

+ ///

Lists your EventBridge rules. You can either list all the rules or provide a prefix to match to the rule names.

ListRules doesn't list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

fn list_rules( &self, input: ListRulesRequest, ) -> RusotoFuture; - ///

Displays the tags associated with a CloudWatch Events resource. In CloudWatch Events, rules can be tagged.

+ ///

Displays the tags associated with an EventBridge resource. In EventBridge, rules can be tagged.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -1582,92 +2679,253 @@ pub trait CloudWatchEvents { input: ListTargetsByRuleRequest, ) -> RusotoFuture; - ///

Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.

+ ///

Sends custom events to EventBridge so that they can be matched to rules. These events can be from your custom applications and services.

fn put_events( &self, input: PutEventsRequest, ) -> RusotoFuture; - ///

Running PutPermission permits the specified AWS account or AWS organization to put events to your account's default event bus. CloudWatch Events rules in your account are triggered by these events arriving to your default event bus.

For another account to send events to your account, that external account must have a CloudWatch Events rule with your account's default event bus as a target.

To enable multiple AWS accounts to put events to your default event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as "*" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

- fn put_permission(&self, input: PutPermissionRequest) -> RusotoFuture<(), PutPermissionError>; + ///

This is used by SaaS partners to write events to a customer's partner event bus.

AWS customers do not use this operation. Instead, AWS customers can use PutEvents to write custom events from their own applications to an event bus.

+ fn put_partner_events( + &self, + input: PutPartnerEventsRequest, + ) -> RusotoFuture; + + ///

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have a rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to an event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as "*" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on an event bus can't exceed 10 KB in size.

+ fn put_permission(&self, input: PutPermissionRequest) -> RusotoFuture<(), PutPermissionError>; + + ///

Creates or updates the specified rule. Rules are enabled by default or based on value of the state. You can disable a rule using DisableRule.

A single rule watches for events from a single event bus. Events generated by AWS services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus.

If you're updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments aren't kept. Instead, they're replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule operation and assign tags, you must have both the events:PutRule and events:TagResource permissions.

If you are updating an existing rule, any tags you specify in the PutRule operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event that you want to match.

In EventBridge, you could create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If you don't write the rule carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.

To prevent this, write the rules so that the triggered actions don't refire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.

An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.

+ fn put_rule(&self, input: PutRuleRequest) -> RusotoFuture; + + ///

Adds the specified targets to the specified rule, or updates the targets if they're already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets in EventBridge:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only on the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same Region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event isn't charged. For more information, see Amazon EventBridge Pricing.

If you're setting an event bus in another account as the target and that account granted permission to your account through an organization instead of directly by the account ID, you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is nonzero in the response, and each entry in FailedEntries provides the ID of the failed target and the error code.

+ fn put_targets( + &self, + input: PutTargetsRequest, + ) -> RusotoFuture; + + ///

Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

+ fn remove_permission( + &self, + input: RemovePermissionRequest, + ) -> RusotoFuture<(), RemovePermissionError>; + + ///

Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.

When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

+ fn remove_targets( + &self, + input: RemoveTargetsRequest, + ) -> RusotoFuture; + + ///

Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In EventBridge, rules can be tagged.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a rule that already has tags. If you specify a new tag key for the rule, this tag is appended to the list of tags associated with the rule. If you specify a tag key that is already associated with the rule, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + ///

Tests whether the specified event pattern matches the provided event.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event that you want to match.

+ fn test_event_pattern( + &self, + input: TestEventPatternRequest, + ) -> RusotoFuture; + + ///

Removes one or more tags from the specified EventBridge resource. In EventBridge, rules can be tagged.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; +} +/// A client for the Amazon EventBridge API. +#[derive(Clone)] +pub struct EventBridgeClient { + client: Client, + region: region::Region, +} + +impl EventBridgeClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> EventBridgeClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> EventBridgeClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> EventBridgeClient { + EventBridgeClient { client, region } + } +} + +impl EventBridge for EventBridgeClient { + ///

Activates a partner event source that has been deactivated. Once activated, your matching event bus will start receiving events from the event source.

This operation is performed by AWS customers, not by SaaS partners.

+ fn activate_event_source( + &self, + input: ActivateEventSourceRequest, + ) -> RusotoFuture<(), ActivateEventSourceError> { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.ActivateEventSource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ActivateEventSourceError::from_response(response)) + }), + ) + } + }) + } + + ///

Creates a new event bus within your account. This can be a custom event bus which you can use to receive events from your own custom applications and services, or it can be a partner event bus which can be matched to a partner event source.

This operation is used by AWS customers, not by SaaS partners.

+ fn create_event_bus( + &self, + input: CreateEventBusRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.CreateEventBus"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateEventBusError::from_response(response))), + ) + } + }) + } + + ///

Called by an SaaS partner to create a partner event source.

This operation is not used by AWS customers.

Each partner event source can be used by one AWS account to create a matching partner event bus in that AWS account. A SaaS partner must create one partner event source for each AWS account that wants to receive those event types.

A partner event source creates events based on resources in the SaaS partner's service or application.

An AWS account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using AWS Events rules and targets.

Partner event source names follow this format:

aws.partner/partnername/eventnamespace/eventname

  • partnername is determined during partner registration and identifies the partner to AWS customers.

  • For eventnamespace, we recommend that partners use a string that identifies the AWS customer within the partner's system. This should not be the customer's AWS account ID.

  • eventname is determined by the partner, and should uniquely identify an event-generating resource within the partner system. This should help AWS customers decide whether to create an event bus to receive these events.

+ fn create_partner_event_source( + &self, + input: CreatePartnerEventSourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.CreatePartnerEventSource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); - ///

Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.

If you are updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments are not kept. Instead, they are replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule operation and assign tags, you must have both the events:PutRule and events:TagResource permissions.

If you are updating an existing rule, any tags you specify in the PutRule operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

In CloudWatch Events, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.

To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.

An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.

- fn put_rule(&self, input: PutRuleRequest) -> RusotoFuture; + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(CreatePartnerEventSourceError::from_response(response)) + })) + } + }) + } - ///

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for CloudWatch Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, CloudWatch Events relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon CloudWatch Events User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon CloudWatch Pricing.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

- fn put_targets( + ///

An AWS customer uses this operation to temporarily stop receiving events from the specified partner event source. The matching event bus isn't deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it's deleted.

To activate a deactivated partner event source, use ActivateEventSource.

+ fn deactivate_event_source( &self, - input: PutTargetsRequest, - ) -> RusotoFuture; + input: DeactivateEventSourceRequest, + ) -> RusotoFuture<(), DeactivateEventSourceError> { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); - ///

Revokes the permission of another AWS account to be able to put events to your default event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

- fn remove_permission( - &self, - input: RemovePermissionRequest, - ) -> RusotoFuture<(), RemovePermissionError>; + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.DeactivateEventSource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); - ///

Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.

When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

- fn remove_targets( - &self, - input: RemoveTargetsRequest, - ) -> RusotoFuture; + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DeactivateEventSourceError::from_response(response)) + }), + ) + } + }) + } - ///

Assigns one or more tags (key-value pairs) to the specified CloudWatch Events resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In CloudWatch Events, rules can be tagged.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a rule that already has tags. If you specify a new tag key for the rule, this tag is appended to the list of tags associated with the rule. If you specify a tag key that is already associated with the rule, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

- fn tag_resource( + ///

Deletes the specified custom event bus or partner event bus. All rules associated with this event bus are also deleted. You can't delete your account's default event bus.

This operation is performed by AWS customers, not by SaaS partners.

+ fn delete_event_bus( &self, - input: TagResourceRequest, - ) -> RusotoFuture; + input: DeleteEventBusRequest, + ) -> RusotoFuture<(), DeleteEventBusError> { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); - ///

Tests whether the specified event pattern matches the provided event.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

- fn test_event_pattern( - &self, - input: TestEventPatternRequest, - ) -> RusotoFuture; + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.DeleteEventBus"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); - ///

Removes one or more tags from the specified CloudWatch Events resource. In CloudWatch Events, rules can be tagged.

- fn untag_resource( + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteEventBusError::from_response(response))), + ) + } + }) + } + + ///

This operation is used by SaaS partners to delete a partner event source. AWS customers don't use this operation.

When you delete an event source, the status of the corresponding partner event bus in the AWS customer account becomes DELETED.

+ fn delete_partner_event_source( &self, - input: UntagResourceRequest, - ) -> RusotoFuture; -} -/// A client for the Amazon CloudWatch Events API. -#[derive(Clone)] -pub struct CloudWatchEventsClient { - client: Client, - region: region::Region, -} + input: DeletePartnerEventSourceRequest, + ) -> RusotoFuture<(), DeletePartnerEventSourceError> { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); -impl CloudWatchEventsClient { - /// Creates a client backed by the default tokio event loop. - /// - /// The client will use the default credentials provider and tls client. - pub fn new(region: region::Region) -> CloudWatchEventsClient { - CloudWatchEventsClient { - client: Client::shared(), - region, - } - } + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.DeletePartnerEventSource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); - pub fn new_with( - request_dispatcher: D, - credentials_provider: P, - region: region::Region, - ) -> CloudWatchEventsClient - where - P: ProvideAwsCredentials + Send + Sync + 'static, - P::Future: Send, - D: DispatchSignedRequest + Send + Sync + 'static, - D::Future: Send, - { - CloudWatchEventsClient { - client: Client::new_with(credentials_provider, request_dispatcher), - region, - } + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DeletePartnerEventSourceError::from_response(response)) + })) + } + }) } -} -impl CloudWatchEvents for CloudWatchEventsClient { - ///

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

+ ///

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you're sure that the other service isn't still using that rule.

fn delete_rule(&self, input: DeleteRuleRequest) -> RusotoFuture<(), DeleteRuleError> { let mut request = SignedRequest::new("POST", "events", &self.region, "/"); @@ -1690,13 +2948,17 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Displays the external AWS accounts that are permitted to write events to your account using your account's event bus, and the associated policy. To enable your account to receive events from other accounts, use PutPermission.

- fn describe_event_bus(&self) -> RusotoFuture { + ///

Displays details about an event bus in your account. This can include the external AWS accounts that are permitted to write events to your default event bus, and the associated policy. For custom event buses and partner event buses, it displays the name, ARN, policy, state, and creation time.

To enable your account to receive events from other accounts on its default event bus, use PutPermission.

For more information about partner event buses, see CreateEventBus.

+ fn describe_event_bus( + &self, + input: DescribeEventBusRequest, + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "events", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "AWSEvents.DescribeEventBus"); - request.set_payload(Some(bytes::Bytes::from_static(b"{}"))); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); self.client.sign_and_dispatch(request, |response| { if response.status.is_success() { @@ -1715,7 +2977,61 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Describes the specified rule.

DescribeRule does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

+ ///

This operation lists details about a partner event source that is shared with your account.

This operation is run by AWS customers, not by SaaS partners.

+ fn describe_event_source( + &self, + input: DescribeEventSourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.DescribeEventSource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeEventSourceError::from_response(response)) + }), + ) + } + }) + } + + ///

An SaaS partner can use this operation to list details about a partner event source that they have created.

AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

+ fn describe_partner_event_source( + &self, + input: DescribePartnerEventSourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.DescribePartnerEventSource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribePartnerEventSourceError::from_response(response)) + })) + } + }) + } + + ///

Describes the specified rule.

DescribeRule doesn't list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

fn describe_rule( &self, input: DescribeRuleRequest, @@ -1744,7 +3060,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Disables the specified rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

When you disable a rule, incoming events might continue to match to the disabled rule. Allow a short period of time for changes to take effect.

+ ///

Disables the specified rule. A disabled rule won't match any events and won't self-trigger if it has a schedule expression.

When you disable a rule, incoming events might continue to match to the disabled rule. Allow a short period of time for changes to take effect.

fn disable_rule(&self, input: DisableRuleRequest) -> RusotoFuture<(), DisableRuleError> { let mut request = SignedRequest::new("POST", "events", &self.region, "/"); @@ -1767,7 +3083,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

+ ///

Enables the specified rule. If the rule doesn't exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

fn enable_rule(&self, input: EnableRuleRequest) -> RusotoFuture<(), EnableRuleError> { let mut request = SignedRequest::new("POST", "events", &self.region, "/"); @@ -1790,7 +3106,118 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Lists the rules for the specified target. You can see which of the rules in Amazon CloudWatch Events can invoke a specific target in your account.

+ ///

Lists all the event buses in your account, including the default event bus, custom event buses, and partner event buses.

This operation is run by AWS customers, not by SaaS partners.

+ fn list_event_buses( + &self, + input: ListEventBusesRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.ListEventBuses"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListEventBusesError::from_response(response))), + ) + } + }) + } + + ///

You can use this to see all the partner event sources that have been shared with your AWS account. For more information about partner event sources, see CreateEventBus.

This operation is run by AWS customers, not by SaaS partners.

+ fn list_event_sources( + &self, + input: ListEventSourcesRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.ListEventSources"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListEventSourcesError::from_response(response))), + ) + } + }) + } + + ///

An SaaS partner can use this operation to display the AWS account ID that a particular partner event source name is associated with.

This operation is used by SaaS partners, not by AWS customers.

+ fn list_partner_event_source_accounts( + &self, + input: ListPartnerEventSourceAccountsRequest, + ) -> RusotoFuture + { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.ListPartnerEventSourceAccounts"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(ListPartnerEventSourceAccountsError::from_response(response)) + })) + } + }) + } + + ///

An SaaS partner can use this operation to list all the partner event source names that they have created.

This operation is not used by AWS customers.

+ fn list_partner_event_sources( + &self, + input: ListPartnerEventSourcesRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.ListPartnerEventSources"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(ListPartnerEventSourcesError::from_response(response)) + })) + } + }) + } + + ///

Lists the rules for the specified target. You can see which rules can invoke a specific target in your account.

fn list_rule_names_by_target( &self, input: ListRuleNamesByTargetRequest, @@ -1818,7 +3245,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Lists your Amazon CloudWatch Events rules. You can either list all the rules or you can provide a prefix to match to the rule names.

ListRules does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

+ ///

Lists your EventBridge rules. You can either list all the rules or provide a prefix to match to the rule names.

ListRules doesn't list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

fn list_rules( &self, input: ListRulesRequest, @@ -1847,7 +3274,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Displays the tags associated with a CloudWatch Events resource. In CloudWatch Events, rules can be tagged.

+ ///

Displays the tags associated with an EventBridge resource. In EventBridge, rules can be tagged.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -1904,7 +3331,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.

+ ///

Sends custom events to EventBridge so that they can be matched to rules. These events can be from your custom applications and services.

fn put_events( &self, input: PutEventsRequest, @@ -1933,7 +3360,36 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Running PutPermission permits the specified AWS account or AWS organization to put events to your account's default event bus. CloudWatch Events rules in your account are triggered by these events arriving to your default event bus.

For another account to send events to your account, that external account must have a CloudWatch Events rule with your account's default event bus as a target.

To enable multiple AWS accounts to put events to your default event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as "*" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

+ ///

This is used by SaaS partners to write events to a customer's partner event bus.

AWS customers do not use this operation. Instead, AWS customers can use PutEvents to write custom events from their own applications to an event bus.

+ fn put_partner_events( + &self, + input: PutPartnerEventsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "events", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSEvents.PutPartnerEvents"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(PutPartnerEventsError::from_response(response))), + ) + } + }) + } + + ///

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have a rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to an event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as "*" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on an event bus can't exceed 10 KB in size.

fn put_permission(&self, input: PutPermissionRequest) -> RusotoFuture<(), PutPermissionError> { let mut request = SignedRequest::new("POST", "events", &self.region, "/"); @@ -1956,7 +3412,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.

If you are updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments are not kept. Instead, they are replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule operation and assign tags, you must have both the events:PutRule and events:TagResource permissions.

If you are updating an existing rule, any tags you specify in the PutRule operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

In CloudWatch Events, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.

To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.

An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.

+ ///

Creates or updates the specified rule. Rules are enabled by default or based on value of the state. You can disable a rule using DisableRule.

A single rule watches for events from a single event bus. Events generated by AWS services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus.

If you're updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments aren't kept. Instead, they're replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule operation and assign tags, you must have both the events:PutRule and events:TagResource permissions.

If you are updating an existing rule, any tags you specify in the PutRule operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event that you want to match.

In EventBridge, you could create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If you don't write the rule carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.

To prevent this, write the rules so that the triggered actions don't refire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.

An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.

fn put_rule(&self, input: PutRuleRequest) -> RusotoFuture { let mut request = SignedRequest::new("POST", "events", &self.region, "/"); @@ -1981,7 +3437,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for CloudWatch Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, CloudWatch Events relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon CloudWatch Events User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon CloudWatch Pricing.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

+ ///

Adds the specified targets to the specified rule, or updates the targets if they're already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets in EventBridge:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only on the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same Region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event isn't charged. For more information, see Amazon EventBridge Pricing.

If you're setting an event bus in another account as the target and that account granted permission to your account through an organization instead of directly by the account ID, you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is nonzero in the response, and each entry in FailedEntries provides the ID of the failed target and the error code.

fn put_targets( &self, input: PutTargetsRequest, @@ -2010,7 +3466,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Revokes the permission of another AWS account to be able to put events to your default event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

+ ///

Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

fn remove_permission( &self, input: RemovePermissionRequest, @@ -2065,7 +3521,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Assigns one or more tags (key-value pairs) to the specified CloudWatch Events resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In CloudWatch Events, rules can be tagged.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a rule that already has tags. If you specify a new tag key for the rule, this tag is appended to the list of tags associated with the rule. If you specify a tag key that is already associated with the rule, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

+ ///

Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In EventBridge, rules can be tagged.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a rule that already has tags. If you specify a new tag key for the rule, this tag is appended to the list of tags associated with the rule. If you specify a tag key that is already associated with the rule, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

fn tag_resource( &self, input: TagResourceRequest, @@ -2094,7 +3550,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Tests whether the specified event pattern matches the provided event.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

+ ///

Tests whether the specified event pattern matches the provided event.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event that you want to match.

fn test_event_pattern( &self, input: TestEventPatternRequest, @@ -2123,7 +3579,7 @@ impl CloudWatchEvents for CloudWatchEventsClient { }) } - ///

Removes one or more tags from the specified CloudWatch Events resource. In CloudWatch Events, rules can be tagged.

+ ///

Removes one or more tags from the specified EventBridge resource. In EventBridge, rules can be tagged.

fn untag_resource( &self, input: UntagResourceRequest, diff --git a/rusoto/services/events/src/lib.rs b/rusoto/services/events/src/lib.rs index 11356fa7ace..f3808f88874 100644 --- a/rusoto/services/events/src/lib.rs +++ b/rusoto/services/events/src/lib.rs @@ -12,9 +12,9 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon CloudWatch Events helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon CloudWatch Events, see the Amazon CloudWatch Events User Guide.

+//!

Amazon EventBridge helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state

  • Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide.

//! -//! If you're using the service, you're probably looking for [CloudWatchEventsClient](struct.CloudWatchEventsClient.html) and [CloudWatchEvents](trait.CloudWatchEvents.html). +//! If you're using the service, you're probably looking for [EventBridgeClient](struct.EventBridgeClient.html) and [EventBridge](trait.EventBridge.html). extern crate bytes; extern crate futures; diff --git a/rusoto/services/firehose/Cargo.toml b/rusoto/services/firehose/Cargo.toml index 214a94569bb..3b33ce80d02 100644 --- a/rusoto/services/firehose/Cargo.toml +++ b/rusoto/services/firehose/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_firehose" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/firehose/README.md b/rusoto/services/firehose/README.md index 8913c990545..d8829f03acd 100644 --- a/rusoto/services/firehose/README.md +++ b/rusoto/services/firehose/README.md @@ -23,9 +23,16 @@ To use `rusoto_firehose` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_firehose = "0.40.0" +rusoto_firehose = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/firehose/src/custom/mod.rs b/rusoto/services/firehose/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/firehose/src/custom/mod.rs +++ b/rusoto/services/firehose/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/firehose/src/generated.rs b/rusoto/services/firehose/src/generated.rs index 42a76e34565..1c11485c1b8 100644 --- a/rusoto/services/firehose/src/generated.rs +++ b/rusoto/services/firehose/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -106,7 +105,7 @@ pub struct CreateDeliveryStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeliveryStreamOutput { ///

The ARN of the delivery stream.

#[serde(rename = "DeliveryStreamARN")] @@ -143,12 +142,12 @@ pub struct DeleteDeliveryStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDeliveryStreamOutput {} ///

Contains information about a delivery stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeliveryStreamDescription { ///

The date and time that the delivery stream was created.

#[serde(rename = "CreateTimestamp")] @@ -191,7 +190,7 @@ pub struct DeliveryStreamDescription { ///

Indicates the server-side encryption (SSE) status for the delivery stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeliveryStreamEncryptionConfiguration { ///

For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption.

#[serde(rename = "Status")] @@ -215,7 +214,7 @@ pub struct DescribeDeliveryStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDeliveryStreamOutput { ///

Information about the delivery stream.

#[serde(rename = "DeliveryStreamDescription")] @@ -237,7 +236,7 @@ pub struct Deserializer { ///

Describes the destination for a delivery stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DestinationDescription { ///

The ID of the destination.

#[serde(rename = "DestinationId")] @@ -323,7 +322,7 @@ pub struct ElasticsearchDestinationConfiguration { ///

The destination description in Amazon ES.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ElasticsearchDestinationDescription { ///

The buffering options.

#[serde(rename = "BufferingHints")] @@ -491,7 +490,7 @@ pub struct ExtendedS3DestinationConfiguration { ///

Describes a destination in Amazon S3.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExtendedS3DestinationDescription { ///

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

#[serde(rename = "BucketARN")] @@ -630,7 +629,7 @@ pub struct KinesisStreamSourceConfiguration { ///

Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KinesisStreamSourceDescription { ///

Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

#[serde(rename = "DeliveryStartTimestamp")] @@ -663,7 +662,7 @@ pub struct ListDeliveryStreamsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeliveryStreamsOutput { ///

The names of the delivery streams.

#[serde(rename = "DeliveryStreamNames")] @@ -689,7 +688,7 @@ pub struct ListTagsForDeliveryStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForDeliveryStreamOutput { ///

If this is true in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey to the key of the last tag returned and call ListTagsForDeliveryStream again.

#[serde(rename = "HasMoreTags")] @@ -846,7 +845,7 @@ pub struct PutRecordBatchInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRecordBatchOutput { ///

Indicates whether server-side encryption (SSE) was enabled during this operation.

#[serde(rename = "Encrypted")] @@ -862,7 +861,7 @@ pub struct PutRecordBatchOutput { ///

Contains the result for an individual record from a PutRecordBatch request. If the record is successfully added to your delivery stream, it receives a record ID. If the record fails to be added to your delivery stream, the result includes an error code and an error message.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRecordBatchResponseEntry { ///

The error code for an individual record result.

#[serde(rename = "ErrorCode")] @@ -889,7 +888,7 @@ pub struct PutRecordInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRecordOutput { ///

Indicates whether server-side encryption (SSE) was enabled during this operation.

#[serde(rename = "Encrypted")] @@ -958,7 +957,7 @@ pub struct RedshiftDestinationConfiguration { ///

Describes a destination in Amazon Redshift.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RedshiftDestinationDescription { ///

The Amazon CloudWatch logging options for your delivery stream.

#[serde(rename = "CloudWatchLoggingOptions")] @@ -1092,7 +1091,7 @@ pub struct S3DestinationConfiguration { ///

Describes a destination in Amazon S3.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct S3DestinationDescription { ///

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

#[serde(rename = "BucketARN")] @@ -1204,7 +1203,7 @@ pub struct Serializer { ///

Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SourceDescription { ///

The KinesisStreamSourceDescription value for the source Kinesis data stream.

#[serde(rename = "KinesisStreamSourceDescription")] @@ -1251,7 +1250,7 @@ pub struct SplunkDestinationConfiguration { ///

Describes a destination in Splunk.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SplunkDestinationDescription { ///

The Amazon CloudWatch logging options for your delivery stream.

#[serde(rename = "CloudWatchLoggingOptions")] @@ -1349,7 +1348,7 @@ pub struct StartDeliveryStreamEncryptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartDeliveryStreamEncryptionOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1360,7 +1359,7 @@ pub struct StopDeliveryStreamEncryptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopDeliveryStreamEncryptionOutput {} ///

Metadata that you can assign to a delivery stream, consisting of a key-value pair.

@@ -1386,7 +1385,7 @@ pub struct TagDeliveryStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagDeliveryStreamOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1400,7 +1399,7 @@ pub struct UntagDeliveryStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagDeliveryStreamOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1433,7 +1432,7 @@ pub struct UpdateDestinationInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDestinationOutput {} /// Errors returned by CreateDeliveryStream @@ -2087,10 +2086,7 @@ impl KinesisFirehoseClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KinesisFirehoseClient { - KinesisFirehoseClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2104,10 +2100,14 @@ impl KinesisFirehoseClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KinesisFirehoseClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KinesisFirehoseClient { + KinesisFirehoseClient { client, region } } } diff --git a/rusoto/services/fms/Cargo.toml b/rusoto/services/fms/Cargo.toml index 5c3c114527a..6717ad16a3f 100644 --- a/rusoto/services/fms/Cargo.toml +++ b/rusoto/services/fms/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_fms" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/fms/README.md b/rusoto/services/fms/README.md index 156b1c1472d..5dbcc2964cc 100644 --- a/rusoto/services/fms/README.md +++ b/rusoto/services/fms/README.md @@ -23,9 +23,16 @@ To use `rusoto_fms` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_fms = "0.40.0" +rusoto_fms = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/fms/src/custom/mod.rs b/rusoto/services/fms/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/fms/src/custom/mod.rs +++ b/rusoto/services/fms/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/fms/src/generated.rs b/rusoto/services/fms/src/generated.rs index 0d9fab5a168..4747a76e67d 100644 --- a/rusoto/services/fms/src/generated.rs +++ b/rusoto/services/fms/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -33,7 +32,7 @@ pub struct AssociateAdminAccountRequest { ///

Details of the resource that is not protected by the policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceViolator { ///

The resource ID.

#[serde(rename = "ResourceId")] @@ -68,7 +67,7 @@ pub struct DisassociateAdminAccountRequest {} ///

Describes the compliance status for the account. An account is considered non-compliant if it includes resources that are not protected by the specified policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EvaluationResult { ///

Describes an AWS account's compliance with the AWS Firewall Manager policy.

#[serde(rename = "ComplianceStatus")] @@ -88,7 +87,7 @@ pub struct EvaluationResult { pub struct GetAdminAccountRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAdminAccountResponse { ///

The AWS account that is set as the AWS Firewall Manager administrator.

#[serde(rename = "AdminAccount")] @@ -111,7 +110,7 @@ pub struct GetComplianceDetailRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetComplianceDetailResponse { ///

Information about the resources and the policy that you specified in the GetComplianceDetail request.

#[serde(rename = "PolicyComplianceDetail")] @@ -123,7 +122,7 @@ pub struct GetComplianceDetailResponse { pub struct GetNotificationChannelRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetNotificationChannelResponse { ///

The IAM role that is used by AWS Firewall Manager to record activity to SNS.

#[serde(rename = "SnsRoleName")] @@ -143,7 +142,7 @@ pub struct GetPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPolicyResponse { ///

Information about the specified AWS Firewall Manager policy.

#[serde(rename = "Policy")] @@ -183,7 +182,7 @@ pub struct GetProtectionStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetProtectionStatusResponse { ///

The ID of the AWS Firewall administrator account for this policy.

#[serde(rename = "AdminAccountId")] @@ -219,7 +218,7 @@ pub struct ListComplianceStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListComplianceStatusResponse { ///

If you have more PolicyComplianceStatus objects than the number that you specified for MaxResults in the request, the response includes a NextToken value. To list more PolicyComplianceStatus objects, submit another ListComplianceStatus request, and specify the NextToken value from the response in the NextToken value in the next request.

#[serde(rename = "NextToken")] @@ -244,7 +243,7 @@ pub struct ListMemberAccountsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListMemberAccountsResponse { ///

An array of account IDs.

#[serde(rename = "MemberAccounts")] @@ -269,7 +268,7 @@ pub struct ListPoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPoliciesResponse { ///

If you have more PolicySummary objects than the number that you specified for MaxResults in the request, the response includes a NextToken value. To list more PolicySummary objects, submit another ListPolicies request, and specify the NextToken value from the response in the NextToken value in the next request.

#[serde(rename = "NextToken")] @@ -327,7 +326,7 @@ pub struct Policy { ///

Describes the non-compliant resources in a member account for a specific AWS Firewall Manager policy. A maximum of 100 entries are displayed. If more than 100 resources are non-compliant, EvaluationLimitExceeded is set to True.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyComplianceDetail { ///

Indicates if over 100 resources are non-compliant with the AWS Firewall Manager policy.

#[serde(rename = "EvaluationLimitExceeded")] @@ -361,7 +360,7 @@ pub struct PolicyComplianceDetail { ///

Indicates whether the account is compliant with the specified policy. An account is considered non-compliant if it includes resources that are not protected by the policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyComplianceStatus { ///

An array of EvaluationResult objects.

#[serde(rename = "EvaluationResults")] @@ -395,7 +394,7 @@ pub struct PolicyComplianceStatus { ///

Details of the AWS Firewall Manager policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicySummary { ///

The Amazon Resource Name (ARN) of the specified policy.

#[serde(rename = "PolicyArn")] @@ -441,7 +440,7 @@ pub struct PutPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutPolicyResponse { ///

The details of the AWS Firewall Manager policy that was created.

#[serde(rename = "Policy")] @@ -1244,10 +1243,7 @@ impl FmsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> FmsClient { - FmsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1261,10 +1257,14 @@ impl FmsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - FmsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> FmsClient { + FmsClient { client, region } } } diff --git a/rusoto/services/fsx/Cargo.toml b/rusoto/services/fsx/Cargo.toml index 9e7791fdafa..1e5715ed962 100644 --- a/rusoto/services/fsx/Cargo.toml +++ b/rusoto/services/fsx/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_fsx" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/fsx/README.md b/rusoto/services/fsx/README.md index c66fce3a476..99481569a84 100644 --- a/rusoto/services/fsx/README.md +++ b/rusoto/services/fsx/README.md @@ -23,9 +23,16 @@ To use `rusoto_fsx` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_fsx = "0.40.0" +rusoto_fsx = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/fsx/src/custom/mod.rs b/rusoto/services/fsx/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/fsx/src/custom/mod.rs +++ b/rusoto/services/fsx/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/fsx/src/generated.rs b/rusoto/services/fsx/src/generated.rs index a88a47a1ef2..a35a89156e6 100644 --- a/rusoto/services/fsx/src/generated.rs +++ b/rusoto/services/fsx/src/generated.rs @@ -9,24 +9,37 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; +///

The Microsoft AD attributes of the Amazon FSx for Windows File Server file system.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ActiveDirectoryBackupAttributes { + ///

The ID of the AWS Managed Microsoft Active Directory instance to which the file system is joined.

+ #[serde(rename = "ActiveDirectoryId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub active_directory_id: Option, + ///

The fully qualified domain name of the self-managed AD directory.

+ #[serde(rename = "DomainName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub domain_name: Option, +} + ///

A backup of an Amazon FSx for Windows File Server file system. You can create a new file system from a backup to protect against data loss.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Backup { ///

The ID of the backup.

#[serde(rename = "BackupId")] @@ -34,6 +47,10 @@ pub struct Backup { ///

The time when a particular backup was created.

#[serde(rename = "CreationTime")] pub creation_time: f64, + ///

The configuration of the self-managed Microsoft Active Directory (AD) to which the Windows File Server instance is joined.

+ #[serde(rename = "DirectoryInformation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub directory_information: Option, ///

Details explaining any failures that occur when creating a backup.

#[serde(rename = "FailureDetails")] #[serde(skip_serializing_if = "Option::is_none")] @@ -66,7 +83,7 @@ pub struct Backup { ///

If backup creation fails, this structure contains the details of that failure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BackupFailureDetails { ///

A message describing the backup creation failure.

#[serde(rename = "Message")] @@ -92,7 +109,7 @@ pub struct CreateBackupRequest { ///

The response object for the CreateBackup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBackupResponse { ///

A description of the backup.

#[serde(rename = "Backup")] @@ -128,7 +145,7 @@ pub struct CreateFileSystemFromBackupRequest { ///

The response object for the CreateFileSystemFromBackup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFileSystemFromBackupResponse { ///

A description of the file system.

#[serde(rename = "FileSystem")] @@ -136,7 +153,7 @@ pub struct CreateFileSystemFromBackupResponse { pub file_system: Option, } -///

The configuration object for Lustre file systems used in the CreateFileSystem operation.

+///

The Lustre configuration for the file system being created. This value is required if FileSystemType is set to LUSTRE.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateFileSystemLustreConfiguration { ///

(Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z.

The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath. If you only specify a bucket name, such as s3://import-bucket, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.

@@ -157,14 +174,14 @@ pub struct CreateFileSystemLustreConfiguration { pub weekly_maintenance_start_time: Option, } -///

The request object for the CreateFileSystem operation.

+///

The request object used to create a new Amazon FSx file system.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateFileSystemRequest { ///

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

#[serde(rename = "ClientRequestToken")] #[serde(skip_serializing_if = "Option::is_none")] pub client_request_token: Option, - ///

The type of file system.

+ ///

The type of Amazon FSx file system to create.

#[serde(rename = "FileSystemType")] pub file_system_type: String, #[serde(rename = "KmsKeyId")] @@ -173,31 +190,31 @@ pub struct CreateFileSystemRequest { #[serde(rename = "LustreConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub lustre_configuration: Option, - ///

A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. This list isn't returned in later describe requests.

+ ///

A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system.

#[serde(rename = "SecurityGroupIds")] #[serde(skip_serializing_if = "Option::is_none")] pub security_group_ids: Option>, - ///

The storage capacity of the file system.

For Windows file systems, the storage capacity has a minimum of 300 GiB, and a maximum of 65,536 GiB.

For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. Storage capacity is provisioned in increments of 3,600 GiB.

+ ///

The storage capacity of the file system being created.

For Windows file systems, the storage capacity has a minimum of 300 GiB, and a maximum of 65,536 GiB.

For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. Storage capacity is provisioned in increments of 3,600 GiB.

#[serde(rename = "StorageCapacity")] pub storage_capacity: i64, - ///

A list of IDs for the subnets that the file system will be accessible from. File systems support only one subnet. The file server is also launched in that subnet's Availability Zone.

+ ///

The IDs of the subnets that the file system will be accessible from. File systems support only one subnet. The file server is also launched in that subnet's Availability Zone.

#[serde(rename = "SubnetIds")] pub subnet_ids: Vec, - ///

The tags to be applied to the file system at file system creation. The key value of the Name tag appears in the console as the file system name.

+ ///

The tags to apply to the file system being created. The key value of the Name tag appears in the console as the file system name.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, - ///

The configuration for this Microsoft Windows file system.

+ ///

The Microsoft Windows configuration for the file system being created. This value is required if FileSystemType is set to WINDOWS.

#[serde(rename = "WindowsConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub windows_configuration: Option, } -///

The response object for the CreateFileSystem operation.

+///

The response object returned after the file system is created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFileSystemResponse { - ///

A description of the file system.

+ ///

The configuration of the file system that was created.

#[serde(rename = "FileSystem")] #[serde(skip_serializing_if = "Option::is_none")] pub file_system: Option, @@ -206,7 +223,7 @@ pub struct CreateFileSystemResponse { ///

The configuration object for the Microsoft Windows file system used in CreateFileSystem and CreateFileSystemFromBackup operations.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateFileSystemWindowsConfiguration { - ///

The ID for an existing Microsoft Active Directory instance that the file system should join when it's created.

+ ///

The ID for an existing AWS Managed Microsoft Active Directory (AD) instance that the file system should join when it's created.

#[serde(rename = "ActiveDirectoryId")] #[serde(skip_serializing_if = "Option::is_none")] pub active_directory_id: Option, @@ -214,18 +231,22 @@ pub struct CreateFileSystemWindowsConfiguration { #[serde(rename = "AutomaticBackupRetentionDays")] #[serde(skip_serializing_if = "Option::is_none")] pub automatic_backup_retention_days: Option, - ///

A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups.

+ ///

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups.

#[serde(rename = "CopyTagsToBackups")] #[serde(skip_serializing_if = "Option::is_none")] pub copy_tags_to_backups: Option, - ///

The preferred time to take daily automatic backups, in the UTC time zone.

+ ///

The preferred time to take daily automatic backups, formatted HH:MM in the UTC time zone.

#[serde(rename = "DailyAutomaticBackupStartTime")] #[serde(skip_serializing_if = "Option::is_none")] pub daily_automatic_backup_start_time: Option, - ///

The throughput of an Amazon FSx file system, measured in megabytes per second.

+ #[serde(rename = "SelfManagedActiveDirectoryConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub self_managed_active_directory_configuration: + Option, + ///

The throughput of an Amazon FSx file system, measured in megabytes per second, in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

#[serde(rename = "ThroughputCapacity")] pub throughput_capacity: i64, - ///

The preferred start time to perform weekly maintenance, in the UTC time zone.

+ ///

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone.

#[serde(rename = "WeeklyMaintenanceStartTime")] #[serde(skip_serializing_if = "Option::is_none")] pub weekly_maintenance_start_time: Option, @@ -233,7 +254,7 @@ pub struct CreateFileSystemWindowsConfiguration { ///

The data repository configuration object for Lustre file systems returned in the response of the CreateFileSystem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DataRepositoryConfiguration { ///

The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.

#[serde(rename = "ExportPath")] @@ -263,7 +284,7 @@ pub struct DeleteBackupRequest { ///

The response object for DeleteBackup operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBackupResponse { ///

The ID of the backup deleted.

#[serde(rename = "BackupId")] @@ -292,7 +313,7 @@ pub struct DeleteFileSystemRequest { ///

The response object for the DeleteFileSystem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFileSystemResponse { ///

The ID of the file system being deleted.

#[serde(rename = "FileSystemId")] @@ -322,7 +343,7 @@ pub struct DeleteFileSystemWindowsConfiguration { ///

The response object for the Microsoft Windows file system used in the DeleteFileSystem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFileSystemWindowsResponse { ///

The ID of the final backup for this file system.

#[serde(rename = "FinalBackupId")] @@ -357,7 +378,7 @@ pub struct DescribeBackupsRequest { ///

Response object for DescribeBackups operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBackupsResponse { ///

Any array of backups.

#[serde(rename = "Backups")] @@ -388,7 +409,7 @@ pub struct DescribeFileSystemsRequest { ///

The response object for DescribeFileSystems operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFileSystemsResponse { ///

An array of file system descriptions.

#[serde(rename = "FileSystems")] @@ -402,7 +423,7 @@ pub struct DescribeFileSystemsResponse { ///

A description of a specific Amazon FSx file system.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FileSystem { ///

The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.

#[serde(rename = "CreationTime")] @@ -415,11 +436,11 @@ pub struct FileSystem { #[serde(rename = "FailureDetails")] #[serde(skip_serializing_if = "Option::is_none")] pub failure_details: Option, - ///

The eight-digit ID of the file system that was automatically assigned by Amazon FSx.

+ ///

The system-generated, unique 17-digit ID of the file system.

#[serde(rename = "FileSystemId")] #[serde(skip_serializing_if = "Option::is_none")] pub file_system_id: Option, - ///

Type of file system. Currently the only supported type is WINDOWS.

+ ///

The type of Amazon FSx file system, either LUSTRE or WINDOWS.

#[serde(rename = "FileSystemType")] #[serde(skip_serializing_if = "Option::is_none")] pub file_system_type: Option, @@ -427,30 +448,30 @@ pub struct FileSystem { #[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, - ///

The lifecycle status of the file system.

+ ///

The lifecycle status of the file system:

  • AVAILABLE indicates that the file system is reachable and available for use.

  • CREATING indicates that Amazon FSx is in the process of creating the new file system.

  • DELETING indicates that Amazon FSx is in the process of deleting the file system.

  • FAILED indicates that Amazon FSx was not able to create the file system.

  • MISCONFIGURED indicates that the file system is in a failed but recoverable state.

  • UPDATING indicates that the file system is undergoing a customer initiated update.

#[serde(rename = "Lifecycle")] #[serde(skip_serializing_if = "Option::is_none")] pub lifecycle: Option, #[serde(rename = "LustreConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub lustre_configuration: Option, - ///

The IDs of the elastic network interface from which a specific file system is accessible. The elastic network interface is automatically created in the same VPC that the Amazon FSx file system was created in. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide.

For an Amazon FSx for Windows File Server file system, you can have one network interface Id. For an Amazon FSx for Lustre file system, you can have more than one.

+ ///

The IDs of the elastic network interface from which a specific file system is accessible. The elastic network interface is automatically created in the same VPC that the Amazon FSx file system was created in. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide.

For an Amazon FSx for Windows File Server file system, you can have one network interface ID. For an Amazon FSx for Lustre file system, you can have more than one.

#[serde(rename = "NetworkInterfaceIds")] #[serde(skip_serializing_if = "Option::is_none")] pub network_interface_ids: Option>, - ///

The AWS account that created the file system. If the file system was created by an IAM user, the AWS account to which the IAM user belongs is the owner.

+ ///

The AWS account that created the file system. If the file system was created by an AWS Identity and Access Management (IAM) user, the AWS account to which the IAM user belongs is the owner.

#[serde(rename = "OwnerId")] #[serde(skip_serializing_if = "Option::is_none")] pub owner_id: Option, - ///

The resource ARN of the file system.

+ ///

The Amazon Resource Name (ARN) for the file system resource.

#[serde(rename = "ResourceARN")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_arn: Option, - ///

The storage capacity of the file system in gigabytes.

+ ///

The storage capacity of the file system in gigabytes (GB).

#[serde(rename = "StorageCapacity")] #[serde(skip_serializing_if = "Option::is_none")] pub storage_capacity: Option, - ///

The IDs of the subnets to contain the endpoint for the file system. One and only one is supported. The file system is launched in the Availability Zone associated with this subnet.

+ ///

The ID of the subnet to contain the endpoint for the file system. One and only one is supported. The file system is launched in the Availability Zone associated with this subnet.

#[serde(rename = "SubnetIds")] #[serde(skip_serializing_if = "Option::is_none")] pub subnet_ids: Option>, @@ -468,11 +489,11 @@ pub struct FileSystem { pub windows_configuration: Option, } -///

Structure providing details of any failures that occur when creating the file system has failed.

+///

A structure providing details of any failures that occur when creating the file system has failed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FileSystemFailureDetails { - ///

Message describing the failures that occurred during file system creation.

+ ///

A message describing any failures that occurred during file system creation.

#[serde(rename = "Message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option, @@ -509,7 +530,7 @@ pub struct ListTagsForResourceRequest { ///

The response object for ListTagsForResource operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

This is present if there are more tags than returned in the response (String). You can use the NextToken value in the later request to fetch the tags.

#[serde(rename = "NextToken")] @@ -523,7 +544,7 @@ pub struct ListTagsForResourceResponse { ///

The configuration for the Amazon FSx for Lustre file system.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LustreFileSystemConfiguration { #[serde(rename = "DataRepositoryConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] @@ -534,6 +555,74 @@ pub struct LustreFileSystemConfiguration { pub weekly_maintenance_start_time: Option, } +///

The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server instance is joined.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct SelfManagedActiveDirectoryAttributes { + ///

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

+ #[serde(rename = "DnsIps")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_ips: Option>, + ///

The fully qualified domain name of the self-managed AD directory.

+ #[serde(rename = "DomainName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub domain_name: Option, + ///

The name of the domain group whose members have administrative privileges for the FSx file system.

+ #[serde(rename = "FileSystemAdministratorsGroup")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_system_administrators_group: Option, + ///

The fully qualified distinguished name of the organizational unit within the self-managed AD directory to which the Windows File Server instance is joined.

+ #[serde(rename = "OrganizationalUnitDistinguishedName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organizational_unit_distinguished_name: Option, + ///

The user name for the service account on your self-managed AD domain that FSx uses to join to your AD domain.

+ #[serde(rename = "UserName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_name: Option, +} + +///

The configuration that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SelfManagedActiveDirectoryConfiguration { + ///

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory. The IP addresses need to be either in the same VPC CIDR range as the one in which your Amazon FSx file system is being created, or in the private IP version 4 (Iv4) address ranges, as specified in RFC 1918:

  • 10.0.0.0 - 10.255.255.255 (10/8 prefix)

  • 172.16.0.0 - 172.31.255.255 (172.16/12 prefix)

  • 192.168.0.0 - 192.168.255.255 (192.168/16 prefix)

+ #[serde(rename = "DnsIps")] + pub dns_ips: Vec, + ///

The fully qualified domain name of the self-managed AD directory, such as corp.example.com.

+ #[serde(rename = "DomainName")] + pub domain_name: String, + ///

(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.

+ #[serde(rename = "FileSystemAdministratorsGroup")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_system_administrators_group: Option, + ///

(Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. Amazon FSx only accepts OU as the direct parent of the file system. An example is OU=FSx,DC=yourdomain,DC=corp,DC=com. To learn more, see RFC 2253. If none is provided, the FSx file system is created in the default location of your self-managed AD directory.

Only Organizational Unit (OU) objects can be the direct parent of the file system that you're creating.

+ #[serde(rename = "OrganizationalUnitDistinguishedName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub organizational_unit_distinguished_name: Option, + ///

The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.

+ #[serde(rename = "Password")] + pub password: String, + ///

The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName, or in the default location of your AD domain.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +///

The configuration that Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft Active Directory (AD) directory.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SelfManagedActiveDirectoryConfigurationUpdates { + ///

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

+ #[serde(rename = "DnsIps")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_ips: Option>, + ///

The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.

+ #[serde(rename = "Password")] + #[serde(skip_serializing_if = "Option::is_none")] + pub password: Option, + ///

The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

+ #[serde(rename = "UserName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_name: Option, +} + ///

Specifies a key-value pair for a resource tag.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { @@ -560,7 +649,7 @@ pub struct TagResourceRequest { ///

The response object for the TagResource operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

The request object for UntagResource action.

@@ -576,7 +665,7 @@ pub struct UntagResourceRequest { ///

The response object for UntagResource action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} ///

The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.

@@ -600,7 +689,7 @@ pub struct UpdateFileSystemRequest { #[serde(rename = "LustreConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub lustre_configuration: Option, - ///

The configuration for this Microsoft Windows file system. The only supported options are for backup and maintenance.

+ ///

The configuration update for this Microsoft Windows file system. The only supported options are for backup and maintenance and for self-managed Active Directory configuration.

#[serde(rename = "WindowsConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub windows_configuration: Option, @@ -608,15 +697,15 @@ pub struct UpdateFileSystemRequest { ///

The response object for the UpdateFileSystem operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFileSystemResponse { - ///

A description of the file system.

+ ///

A description of the file system that was updated.

#[serde(rename = "FileSystem")] #[serde(skip_serializing_if = "Option::is_none")] pub file_system: Option, } -///

The configuration object for the Microsoft Windows file system used in the UpdateFileSystem operation.

+///

Updates the Microsoft Windows configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx overwrites existing properties with non-null values provided in the request. If you don't specify a non-null value for a property, that property is not updated.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateFileSystemWindowsConfiguration { ///

The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 35 days.

@@ -627,6 +716,11 @@ pub struct UpdateFileSystemWindowsConfiguration { #[serde(rename = "DailyAutomaticBackupStartTime")] #[serde(skip_serializing_if = "Option::is_none")] pub daily_automatic_backup_start_time: Option, + ///

The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory.

+ #[serde(rename = "SelfManagedActiveDirectoryConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub self_managed_active_directory_configuration: + Option, ///

The preferred time to perform weekly maintenance, in the UTC time zone.

#[serde(rename = "WeeklyMaintenanceStartTime")] #[serde(skip_serializing_if = "Option::is_none")] @@ -635,7 +729,7 @@ pub struct UpdateFileSystemWindowsConfiguration { ///

The configuration for this Microsoft Windows file system.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WindowsFileSystemConfiguration { ///

The ID for an existing Microsoft Active Directory instance that the file system should join when it's created.

#[serde(rename = "ActiveDirectoryId")] @@ -657,6 +751,9 @@ pub struct WindowsFileSystemConfiguration { #[serde(rename = "MaintenanceOperationsInProgress")] #[serde(skip_serializing_if = "Option::is_none")] pub maintenance_operations_in_progress: Option>, + #[serde(rename = "SelfManagedActiveDirectoryConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub self_managed_active_directory_configuration: Option, ///

The throughput of an Amazon FSx file system, measured in megabytes per second.

#[serde(rename = "ThroughputCapacity")] #[serde(skip_serializing_if = "Option::is_none")] @@ -682,6 +779,8 @@ pub enum CreateBackupError { InternalServerError(String), ///

An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting AWS Support.

ServiceLimitExceeded(String), + ///

An error occured.

+ UnsupportedOperation(String), } impl CreateBackupError { @@ -708,6 +807,9 @@ impl CreateBackupError { "ServiceLimitExceeded" => { return RusotoError::Service(CreateBackupError::ServiceLimitExceeded(err.msg)) } + "UnsupportedOperation" => { + return RusotoError::Service(CreateBackupError::UnsupportedOperation(err.msg)) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -729,6 +831,7 @@ impl Error for CreateBackupError { CreateBackupError::IncompatibleParameterError(ref cause) => cause, CreateBackupError::InternalServerError(ref cause) => cause, CreateBackupError::ServiceLimitExceeded(ref cause) => cause, + CreateBackupError::UnsupportedOperation(ref cause) => cause, } } } @@ -1342,6 +1445,8 @@ pub enum UpdateFileSystemError { InternalServerError(String), ///

File system configuration is required for this operation.

MissingFileSystemConfiguration(String), + ///

An error occured.

+ UnsupportedOperation(String), } impl UpdateFileSystemError { @@ -1369,6 +1474,11 @@ impl UpdateFileSystemError { UpdateFileSystemError::MissingFileSystemConfiguration(err.msg), ) } + "UnsupportedOperation" => { + return RusotoError::Service(UpdateFileSystemError::UnsupportedOperation( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -1389,6 +1499,7 @@ impl Error for UpdateFileSystemError { UpdateFileSystemError::IncompatibleParameterError(ref cause) => cause, UpdateFileSystemError::InternalServerError(ref cause) => cause, UpdateFileSystemError::MissingFileSystemConfiguration(ref cause) => cause, + UpdateFileSystemError::UnsupportedOperation(ref cause) => cause, } } } @@ -1406,7 +1517,7 @@ pub trait Fsx { input: CreateFileSystemRequest, ) -> RusotoFuture; - ///

Creates a new Amazon FSx file system from an existing Amazon FSx for Windows File Server backup.

If a file system with the specified client request token exists and the parameters match, this call returns the description of the existing file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

  • Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system.

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

+ ///

Creates a new Amazon FSx file system from an existing Amazon FSx for Windows File Server backup.

If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

  • Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system.

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

fn create_file_system_from_backup( &self, input: CreateFileSystemFromBackupRequest, @@ -1472,10 +1583,7 @@ impl FsxClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> FsxClient { - FsxClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1489,10 +1597,14 @@ impl FsxClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - FsxClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> FsxClient { + FsxClient { client, region } } } @@ -1558,7 +1670,7 @@ impl Fsx for FsxClient { }) } - ///

Creates a new Amazon FSx file system from an existing Amazon FSx for Windows File Server backup.

If a file system with the specified client request token exists and the parameters match, this call returns the description of the existing file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

  • Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system.

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

+ ///

Creates a new Amazon FSx file system from an existing Amazon FSx for Windows File Server backup.

If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

  • Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system.

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

fn create_file_system_from_backup( &self, input: CreateFileSystemFromBackupRequest, diff --git a/rusoto/services/gamelift/Cargo.toml b/rusoto/services/gamelift/Cargo.toml index 5cfea36056f..8a3ad877dfc 100644 --- a/rusoto/services/gamelift/Cargo.toml +++ b/rusoto/services/gamelift/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_gamelift" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/gamelift/README.md b/rusoto/services/gamelift/README.md index 854a277291f..eaa7cadcc15 100644 --- a/rusoto/services/gamelift/README.md +++ b/rusoto/services/gamelift/README.md @@ -23,9 +23,16 @@ To use `rusoto_gamelift` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_gamelift = "0.40.0" +rusoto_gamelift = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/gamelift/src/custom/mod.rs b/rusoto/services/gamelift/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/gamelift/src/custom/mod.rs +++ b/rusoto/services/gamelift/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/gamelift/src/generated.rs b/rusoto/services/gamelift/src/generated.rs index 4cc92d010e2..2da524b24b2 100644 --- a/rusoto/services/gamelift/src/generated.rs +++ b/rusoto/services/gamelift/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -39,12 +38,12 @@ pub struct AcceptMatchInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptMatchOutput {} ///

Properties describing a fleet alias.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Alias { ///

Unique identifier for an alias; alias ARNs are unique across all regions.

#[serde(rename = "AliasArn")] @@ -76,7 +75,7 @@ pub struct Alias { pub routing_strategy: Option, } -///

Values for use in Player attribute key:value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array or data map. Each AttributeValue object can use only one of the available properties.

+///

Values for use in Player attribute key:value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array, or data map. Each AttributeValue object can use only one of the available properties.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttributeValue { ///

For number values, expressed as double.

@@ -99,7 +98,7 @@ pub struct AttributeValue { ///

Temporary access credentials used for uploading game build files to Amazon GameLift. They are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AwsCredentials { ///

Temporary key allowing access to the Amazon GameLift S3 account.

#[serde(rename = "AccessKeyId")] @@ -117,7 +116,7 @@ pub struct AwsCredentials { ///

Properties describing a custom game build.

Related operations

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Build { ///

Unique identifier for a build.

#[serde(rename = "BuildId")] @@ -149,6 +148,12 @@ pub struct Build { pub version: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CertificateConfiguration { + #[serde(rename = "CertificateType")] + pub certificate_type: String, +} + ///

Represents the input for a request action.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateAliasInput { @@ -166,7 +171,7 @@ pub struct CreateAliasInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAliasOutput { ///

Object that describes the newly created alias record.

#[serde(rename = "Alias")] @@ -197,7 +202,7 @@ pub struct CreateBuildInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBuildOutput { ///

The newly created build record, including a unique build ID and status.

#[serde(rename = "Build")] @@ -220,6 +225,9 @@ pub struct CreateFleetInput { #[serde(rename = "BuildId")] #[serde(skip_serializing_if = "Option::is_none")] pub build_id: Option, + #[serde(rename = "CertificateConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub certificate_configuration: Option, ///

Human-readable description of a fleet.

#[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] @@ -286,7 +294,7 @@ pub struct CreateFleetInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFleetOutput { ///

Properties for the newly created fleet.

#[serde(rename = "FleetAttributes")] @@ -336,7 +344,7 @@ pub struct CreateGameSessionInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGameSessionOutput { ///

Object that describes the newly created game session record.

#[serde(rename = "GameSession")] @@ -366,7 +374,7 @@ pub struct CreateGameSessionQueueInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGameSessionQueueOutput { ///

Object that describes the newly created game session queue.

#[serde(rename = "GameSessionQueue")] @@ -377,7 +385,7 @@ pub struct CreateGameSessionQueueOutput { ///

Represents the input for a request action.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateMatchmakingConfigurationInput { - ///

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

+ ///

Flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

#[serde(rename = "AcceptanceRequired")] pub acceptance_required: bool, ///

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

@@ -388,7 +396,11 @@ pub struct CreateMatchmakingConfigurationInput { #[serde(rename = "AdditionalPlayerCount")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_player_count: Option, - ///

Information to attached to all events related to the matchmaking configuration.

+ ///

Method used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

+ #[serde(rename = "BackfillMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub backfill_mode: Option, + ///

Information to be added to all events related to this matchmaking configuration.

#[serde(rename = "CustomEventData")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_event_data: Option, @@ -404,7 +416,7 @@ pub struct CreateMatchmakingConfigurationInput { #[serde(rename = "GameSessionData")] #[serde(skip_serializing_if = "Option::is_none")] pub game_session_data: Option, - ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

+ ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

#[serde(rename = "GameSessionQueueArns")] pub game_session_queue_arns: Vec, ///

Unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

@@ -414,7 +426,7 @@ pub struct CreateMatchmakingConfigurationInput { #[serde(rename = "NotificationTarget")] #[serde(skip_serializing_if = "Option::is_none")] pub notification_target: Option, - ///

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that time out can be resubmitted as needed.

+ ///

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

#[serde(rename = "RequestTimeoutSeconds")] pub request_timeout_seconds: i64, ///

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

@@ -424,7 +436,7 @@ pub struct CreateMatchmakingConfigurationInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateMatchmakingConfigurationOutput { ///

Object that describes the newly created matchmaking configuration.

#[serde(rename = "Configuration")] @@ -438,14 +450,14 @@ pub struct CreateMatchmakingRuleSetInput { ///

Unique identifier for a matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. (Note: The rule set name is different from the optional "name" field in the rule set body.)

#[serde(rename = "Name")] pub name: String, - ///

Collection of matchmaking rules, formatted as a JSON string. Note that comments are not allowed in JSON, but most elements support a description field.

+ ///

Collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.

#[serde(rename = "RuleSetBody")] pub rule_set_body: String, } ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateMatchmakingRuleSetOutput { ///

Object that describes the newly created matchmaking rule set.

#[serde(rename = "RuleSet")] @@ -469,7 +481,7 @@ pub struct CreatePlayerSessionInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePlayerSessionOutput { ///

Object that describes the newly created player session record.

#[serde(rename = "PlayerSession")] @@ -494,7 +506,7 @@ pub struct CreatePlayerSessionsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePlayerSessionsOutput { ///

Collection of player session objects created for the added players.

#[serde(rename = "PlayerSessions")] @@ -528,7 +540,7 @@ pub struct CreateScriptInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateScriptOutput { ///

The newly created script record with a unique script ID. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift service.

#[serde(rename = "Script")] @@ -549,7 +561,7 @@ pub struct CreateVpcPeeringAuthorizationInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateVpcPeeringAuthorizationOutput { ///

Details on the requested VPC peering authorization, including expiration.

#[serde(rename = "VpcPeeringAuthorization")] @@ -572,7 +584,7 @@ pub struct CreateVpcPeeringConnectionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateVpcPeeringConnectionOutput {} ///

Represents the input for a request action.

@@ -608,7 +620,7 @@ pub struct DeleteGameSessionQueueInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGameSessionQueueOutput {} ///

Represents the input for a request action.

@@ -620,7 +632,7 @@ pub struct DeleteMatchmakingConfigurationInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteMatchmakingConfigurationOutput {} ///

Represents the input for a request action.

@@ -633,7 +645,7 @@ pub struct DeleteMatchmakingRuleSetInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteMatchmakingRuleSetOutput {} ///

Represents the input for a request action.

@@ -666,7 +678,7 @@ pub struct DeleteVpcPeeringAuthorizationInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteVpcPeeringAuthorizationOutput {} ///

Represents the input for a request action.

@@ -681,7 +693,7 @@ pub struct DeleteVpcPeeringConnectionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteVpcPeeringConnectionOutput {} ///

Represents the input for a request action.

@@ -694,7 +706,7 @@ pub struct DescribeAliasInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAliasOutput { ///

Object that contains the requested alias.

#[serde(rename = "Alias")] @@ -712,7 +724,7 @@ pub struct DescribeBuildInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBuildOutput { ///

Set of properties describing the requested build.

#[serde(rename = "Build")] @@ -731,7 +743,7 @@ pub struct DescribeEC2InstanceLimitsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEC2InstanceLimitsOutput { ///

Object that contains the maximum number of instances for the specified instance type.

#[serde(rename = "EC2InstanceLimits")] @@ -758,7 +770,7 @@ pub struct DescribeFleetAttributesInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetAttributesOutput { ///

Collection of objects containing attribute metadata for each requested fleet ID.

#[serde(rename = "FleetAttributes")] @@ -789,7 +801,7 @@ pub struct DescribeFleetCapacityInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetCapacityOutput { ///

Collection of objects containing capacity information for each requested fleet ID. Leave this parameter empty to retrieve capacity information for all fleets.

#[serde(rename = "FleetCapacity")] @@ -827,7 +839,7 @@ pub struct DescribeFleetEventsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetEventsOutput { ///

Collection of objects containing event log entries for the specified fleet.

#[serde(rename = "Events")] @@ -849,7 +861,7 @@ pub struct DescribeFleetPortSettingsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetPortSettingsOutput { ///

Object that contains port settings for the requested fleet ID.

#[serde(rename = "InboundPermissions")] @@ -876,7 +888,7 @@ pub struct DescribeFleetUtilizationInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetUtilizationOutput { ///

Collection of objects containing utilization information for each requested fleet ID.

#[serde(rename = "FleetUtilization")] @@ -919,7 +931,7 @@ pub struct DescribeGameSessionDetailsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGameSessionDetailsOutput { ///

Collection of objects containing game session properties and the protection policy currently in force for each session matching the request.

#[serde(rename = "GameSessionDetails")] @@ -941,7 +953,7 @@ pub struct DescribeGameSessionPlacementInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGameSessionPlacementOutput { ///

Object that describes the requested game session placement.

#[serde(rename = "GameSessionPlacement")] @@ -968,7 +980,7 @@ pub struct DescribeGameSessionQueuesInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGameSessionQueuesOutput { ///

Collection of objects that describes the requested game session queues.

#[serde(rename = "GameSessionQueues")] @@ -1011,7 +1023,7 @@ pub struct DescribeGameSessionsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGameSessionsOutput { ///

Collection of objects containing game session properties for each session matching the request.

#[serde(rename = "GameSessions")] @@ -1045,7 +1057,7 @@ pub struct DescribeInstancesInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstancesOutput { ///

Collection of objects containing properties for each instance returned.

#[serde(rename = "Instances")] @@ -1080,7 +1092,7 @@ pub struct DescribeMatchmakingConfigurationsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMatchmakingConfigurationsOutput { ///

Collection of requested matchmaking configuration objects.

#[serde(rename = "Configurations")] @@ -1102,7 +1114,7 @@ pub struct DescribeMatchmakingInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMatchmakingOutput { ///

Collection of existing matchmaking ticket objects matching the request.

#[serde(rename = "TicketList")] @@ -1129,7 +1141,7 @@ pub struct DescribeMatchmakingRuleSetsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMatchmakingRuleSetsOutput { ///

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

#[serde(rename = "NextToken")] @@ -1171,7 +1183,7 @@ pub struct DescribePlayerSessionsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePlayerSessionsOutput { ///

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

#[serde(rename = "NextToken")] @@ -1193,7 +1205,7 @@ pub struct DescribeRuntimeConfigurationInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRuntimeConfigurationOutput { ///

Instructions describing how server processes should be launched and maintained on each instance in the fleet.

#[serde(rename = "RuntimeConfiguration")] @@ -1223,7 +1235,7 @@ pub struct DescribeScalingPoliciesInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScalingPoliciesOutput { ///

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

#[serde(rename = "NextToken")] @@ -1243,7 +1255,7 @@ pub struct DescribeScriptInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScriptOutput { ///

Set of properties describing the requested script.

#[serde(rename = "Script")] @@ -1255,7 +1267,7 @@ pub struct DescribeScriptOutput { pub struct DescribeVpcPeeringAuthorizationsInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeVpcPeeringAuthorizationsOutput { ///

Collection of objects that describe all valid VPC peering operations for the current AWS account.

#[serde(rename = "VpcPeeringAuthorizations")] @@ -1274,7 +1286,7 @@ pub struct DescribeVpcPeeringConnectionsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeVpcPeeringConnectionsOutput { ///

Collection of VPC peering connection records that match the request.

#[serde(rename = "VpcPeeringConnections")] @@ -1297,7 +1309,7 @@ pub struct DesiredPlayerSession { ///

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EC2InstanceCounts { ///

Actual number of active instances in the fleet.

#[serde(rename = "ACTIVE")] @@ -1331,7 +1343,7 @@ pub struct EC2InstanceCounts { ///

Maximum number of instances allowed based on the Amazon Elastic Compute Cloud (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EC2InstanceLimit { ///

Number of instances of the specified type that are currently in use by this AWS account.

#[serde(rename = "CurrentInstances")] @@ -1349,7 +1361,7 @@ pub struct EC2InstanceLimit { ///

Log entry describing an event that involves Amazon GameLift resources (such as a fleet). In addition to tracking activity, event codes and messages can provide additional information for troubleshooting and debugging problems.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Event { ///

Type of event being logged. The following events are currently in use:

Fleet creation events:

  • FLEETCREATED -- A fleet record was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEETSTATEDOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEETBINARYDOWNLOADFAILED -- The build failed to download to the fleet instance.

  • FLEETCREATIONEXTRACTINGBUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEETCREATIONRUNNINGINSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEETCREATIONVALIDATINGRUNTIMECONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's run-time configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the run-time configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEETSTATEVALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEETVALIDATIONLAUNCHPATHNOTFOUND -- Validation of the run-time configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEETSTATEBUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEETVALIDATIONEXECUTABLERUNTIMEFAILURE -- Validation of the run-time configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEETSTATEACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEETACTIVATIONFAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. A possible reason for failure is that the game server is not reporting "process ready" to the Amazon GameLift service.

  • FLEETSTATEACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEETVPCPEERINGSUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEETVPCPEERINGFAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEETVPCPEERINGDELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCEINTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEETSCALINGEVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEETNEWGAMESESSIONPROTECTIONPOLICYUPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEETDELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

#[serde(rename = "EventCode")] @@ -1379,12 +1391,15 @@ pub struct Event { ///

General properties describing a fleet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FleetAttributes { ///

Unique identifier for a build.

#[serde(rename = "BuildId")] #[serde(skip_serializing_if = "Option::is_none")] pub build_id: Option, + #[serde(rename = "CertificateConfiguration")] + #[serde(skip_serializing_if = "Option::is_none")] + pub certificate_configuration: Option, ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1465,7 +1480,7 @@ pub struct FleetAttributes { ///

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FleetCapacity { ///

Unique identifier for a fleet.

#[serde(rename = "FleetId")] @@ -1483,7 +1498,7 @@ pub struct FleetCapacity { ///

Current status of fleet utilization, including the number of game and player sessions being hosted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FleetUtilization { ///

Number of active game sessions currently being hosted on all instances in the fleet.

#[serde(rename = "ActiveGameSessionCount")] @@ -1520,7 +1535,7 @@ pub struct GameProperty { ///

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GameSession { ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] @@ -1534,6 +1549,9 @@ pub struct GameSession { #[serde(rename = "CurrentPlayerSessionCount")] #[serde(skip_serializing_if = "Option::is_none")] pub current_player_session_count: Option, + #[serde(rename = "DnsName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_name: Option, ///

Unique identifier for a fleet that the game session is running on.

#[serde(rename = "FleetId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1590,8 +1608,11 @@ pub struct GameSession { ///

Connection information for the new game session that is created with matchmaking. (with StartMatchmaking). Once a match is set, the FlexMatch engine places the match and creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GameSessionConnectionInfo { + #[serde(rename = "DnsName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_name: Option, ///

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

#[serde(rename = "GameSessionArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1612,7 +1633,7 @@ pub struct GameSessionConnectionInfo { ///

A game session's properties plus the protection policy currently in force.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GameSessionDetail { ///

Object that describes a game session.

#[serde(rename = "GameSession")] @@ -1626,8 +1647,11 @@ pub struct GameSessionDetail { ///

Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.

Game session placement-related operations include:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GameSessionPlacement { + #[serde(rename = "DnsName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_name: Option, ///

Time stamp indicating when this request was completed, canceled, or timed out.

#[serde(rename = "EndTime")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1700,13 +1724,13 @@ pub struct GameSessionPlacement { ///

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

  • The destinations where a new game session can potentially be hosted. Amazon GameLift tries these destinations in an order based on either the queue's default order or player latency information, if provided in a placement request. With latency information, Amazon GameLift can place game sessions where the majority of players are reporting the lowest possible latency.

  • The length of time that placement requests can wait in the queue before timing out.

  • A set of optional latency policies that protect individual players from high latencies, preventing game sessions from being placed where any individual player is reporting latency higher than a policy's maximum.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GameSessionQueue { ///

List of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

#[serde(rename = "Destinations")] #[serde(skip_serializing_if = "Option::is_none")] pub destinations: Option>, - ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

+ ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>.

#[serde(rename = "GameSessionQueueArn")] #[serde(skip_serializing_if = "Option::is_none")] pub game_session_queue_arn: Option, @@ -1743,7 +1767,7 @@ pub struct GetGameSessionLogUrlInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGameSessionLogUrlOutput { ///

Location of the requested game session logs, available for download. This URL is valid for 15 minutes, after which S3 will reject any download request using this URL. You can request a new URL any time within the 14-day period that the logs are retained.

#[serde(rename = "PreSignedUrl")] @@ -1764,7 +1788,7 @@ pub struct GetInstanceAccessInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceAccessOutput { ///

Object that contains connection information for a fleet instance, including IP address and access credentials.

#[serde(rename = "InstanceAccess")] @@ -1774,12 +1798,15 @@ pub struct GetInstanceAccessOutput { ///

Properties that describe an instance of a virtual computing resource that hosts one or more game servers. A fleet may contain zero or more instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Instance { ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, + #[serde(rename = "DnsName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_name: Option, ///

Unique identifier for a fleet that the instance is in.

#[serde(rename = "FleetId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1808,7 +1835,7 @@ pub struct Instance { ///

Information required to remotely connect to a fleet instance. Access is requested by calling GetInstanceAccess.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceAccess { ///

Credentials required to access the instance.

#[serde(rename = "Credentials")] @@ -1834,7 +1861,7 @@ pub struct InstanceAccess { ///

Set of credentials required to remotely access a fleet instance. Access credentials are requested by calling GetInstanceAccess and returned in an InstanceAccess object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceCredentials { ///

Secret string. For Windows instances, the secret is a password for use with Windows Remote Desktop. For Linux instances, it is a private key (which must be saved as a .pem file) for use with SSH.

#[serde(rename = "Secret")] @@ -1886,7 +1913,7 @@ pub struct ListAliasesInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAliasesOutput { ///

Collection of alias records that match the list request.

#[serde(rename = "Aliases")] @@ -1917,7 +1944,7 @@ pub struct ListBuildsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBuildsOutput { ///

Collection of build records that match the request.

#[serde(rename = "Builds")] @@ -1952,7 +1979,7 @@ pub struct ListFleetsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFleetsOutput { ///

Set of fleet IDs matching the list request. You can retrieve additional information about all returned fleets by passing this result set to a call to DescribeFleetAttributes, DescribeFleetCapacity, or DescribeFleetUtilization.

#[serde(rename = "FleetIds")] @@ -1977,7 +2004,7 @@ pub struct ListScriptsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListScriptsOutput { ///

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

#[serde(rename = "NextToken")] @@ -1991,7 +2018,7 @@ pub struct ListScriptsOutput { ///

Represents a new player session that is created as a result of a successful FlexMatch match. A successful match automatically creates new player sessions for every player ID in the original matchmaking request.

When players connect to the match's game session, they must include both player ID and player session ID in order to claim their assigned player slot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MatchedPlayerSession { ///

Unique identifier for a player

#[serde(rename = "PlayerId")] @@ -2005,9 +2032,9 @@ pub struct MatchedPlayerSession { ///

Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MatchmakingConfiguration { - ///

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

+ ///

Flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

#[serde(rename = "AcceptanceRequired")] #[serde(skip_serializing_if = "Option::is_none")] pub acceptance_required: Option, @@ -2019,11 +2046,15 @@ pub struct MatchmakingConfiguration { #[serde(rename = "AdditionalPlayerCount")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_player_count: Option, + ///

Method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

+ #[serde(rename = "BackfillMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub backfill_mode: Option, ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, - ///

Information to attached to all events related to the matchmaking configuration.

+ ///

Information to attach to all events related to the matchmaking configuration.

#[serde(rename = "CustomEventData")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_event_data: Option, @@ -2039,7 +2070,7 @@ pub struct MatchmakingConfiguration { #[serde(rename = "GameSessionData")] #[serde(skip_serializing_if = "Option::is_none")] pub game_session_data: Option, - ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

+ ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

#[serde(rename = "GameSessionQueueArns")] #[serde(skip_serializing_if = "Option::is_none")] pub game_session_queue_arns: Option>, @@ -2051,7 +2082,7 @@ pub struct MatchmakingConfiguration { #[serde(rename = "NotificationTarget")] #[serde(skip_serializing_if = "Option::is_none")] pub notification_target: Option, - ///

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that time out can be resubmitted as needed.

+ ///

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

#[serde(rename = "RequestTimeoutSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub request_timeout_seconds: Option, @@ -2061,15 +2092,15 @@ pub struct MatchmakingConfiguration { pub rule_set_name: Option, } -///

Set of rule statements, used with FlexMatch, that determine how to build a certain kind of player match. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

+///

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MatchmakingRuleSet { ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, - ///

Collection of matchmaking rules, formatted as a JSON string. (Note that comments14 are not allowed in JSON, but most elements support a description field.)

+ ///

Collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.

#[serde(rename = "RuleSetBody")] pub rule_set_body: String, ///

Unique identifier for a matchmaking rule set

@@ -2080,7 +2111,7 @@ pub struct MatchmakingRuleSet { ///

Ticket generated to track the progress of a matchmaking request. Each ticket is uniquely identified by a ticket ID, supplied by the requester, when creating a matchmaking request with StartMatchmaking. Tickets can be retrieved by calling DescribeMatchmaking with the ticket ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MatchmakingTicket { ///

Name of the MatchmakingConfiguration that is used with this ticket. Matchmaking configurations determine how players are grouped into a match and how a new game session is created for the match.

#[serde(rename = "ConfigurationName")] @@ -2106,7 +2137,7 @@ pub struct MatchmakingTicket { #[serde(rename = "StartTime")] #[serde(skip_serializing_if = "Option::is_none")] pub start_time: Option, - ///

Current status of the matchmaking request.

  • QUEUED -- The matchmaking request has been received and is currently waiting to be processed.

  • SEARCHING -- The matchmaking request is currently being processed.

  • REQUIRESACCEPTANCE -- A match has been proposed and the players must accept the match (see AcceptMatch). This status is used only with requests that use a matchmaking configuration with a player acceptance requirement.

  • PLACING -- The FlexMatch engine has matched players and is in the process of placing a new game session for the match.

  • COMPLETED -- Players have been matched and a game session is ready to host the players. A ticket in this state contains the necessary connection information for players.

  • FAILED -- The matchmaking request was not completed. Tickets with players who fail to accept a proposed match are placed in FAILED status.

  • CANCELLED -- The matchmaking request was canceled with a call to StopMatchmaking.

  • TIMEDOUT -- The matchmaking request was not successful within the duration specified in the matchmaking configuration.

Matchmaking requests that fail to successfully complete (statuses FAILED, CANCELLED, TIMED_OUT) can be resubmitted as new requests with new ticket IDs.

+ ///

Current status of the matchmaking request.

  • QUEUED -- The matchmaking request has been received and is currently waiting to be processed.

  • SEARCHING -- The matchmaking request is currently being processed.

  • REQUIRESACCEPTANCE -- A match has been proposed and the players must accept the match (see AcceptMatch). This status is used only with requests that use a matchmaking configuration with a player acceptance requirement.

  • PLACING -- The FlexMatch engine has matched players and is in the process of placing a new game session for the match.

  • COMPLETED -- Players have been matched and a game session is ready to host the players. A ticket in this state contains the necessary connection information for players.

  • FAILED -- The matchmaking request was not completed.

  • CANCELLED -- The matchmaking request was canceled. This may be the result of a call to StopMatchmaking or a proposed match that one or more players failed to accept.

  • TIMEDOUT -- The matchmaking request was not successful within the duration specified in the matchmaking configuration.

Matchmaking requests that fail to successfully complete (statuses FAILED, CANCELLED, TIMED_OUT) can be resubmitted as new requests with new ticket IDs.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, @@ -2126,7 +2157,7 @@ pub struct MatchmakingTicket { ///

Information about a player session that was created as part of a StartGameSessionPlacement request. This object contains only the player ID and player session ID. To retrieve full details on a player session, call DescribePlayerSessions with the player session ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PlacedPlayerSession { ///

Unique identifier for a player that is associated with this player session.

#[serde(rename = "PlayerId")] @@ -2191,12 +2222,15 @@ pub struct PlayerLatencyPolicy { ///

Properties describing a player session. Player session objects are created either by creating a player session for a specific game session, or as part of a game session placement. A player session represents either a player reservation for a game session (status RESERVED) or actual player activity in a game session (status ACTIVE). A player session object (including player data) is automatically passed to a game session when the player connects to the game session and is validated.

When a player disconnects, the player session status changes to COMPLETED. Once the session ends, the player session object is retained for 30 days and then removed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PlayerSession { ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, + #[serde(rename = "DnsName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_name: Option, ///

Unique identifier for a fleet that the player's game session is running on.

#[serde(rename = "FleetId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2279,7 +2313,7 @@ pub struct PutScalingPolicyInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutScalingPolicyOutput { ///

Descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

#[serde(rename = "Name")] @@ -2297,7 +2331,7 @@ pub struct RequestUploadCredentialsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestUploadCredentialsOutput { ///

Amazon S3 path and key, identifying where the game build files are stored.

#[serde(rename = "StorageLocation")] @@ -2319,7 +2353,7 @@ pub struct ResolveAliasInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResolveAliasOutput { ///

Fleet identifier that is associated with the requested alias.

#[serde(rename = "FleetId")] @@ -2340,7 +2374,7 @@ pub struct ResourceCreationLimitPolicy { pub policy_period_in_minutes: Option, } -///

Routing configuration for a fleet alias.

+///

Routing configuration for a fleet alias.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RoutingStrategy { ///

Unique identifier for a fleet that the alias points to.

@@ -2397,7 +2431,7 @@ pub struct S3Location { ///

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScalingPolicy { ///

Comparison operator to use when measuring a metric against the threshold value.

#[serde(rename = "ComparisonOperator")] @@ -2447,7 +2481,7 @@ pub struct ScalingPolicy { ///

Properties describing a Realtime script.

Related operations

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Script { ///

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] @@ -2505,7 +2539,7 @@ pub struct SearchGameSessionsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchGameSessionsOutput { ///

Collection of objects containing game session properties for each session matching the request.

#[serde(rename = "GameSessions")] @@ -2543,7 +2577,7 @@ pub struct StartFleetActionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartFleetActionsOutput {} ///

Represents the input for a request action.

@@ -2582,7 +2616,7 @@ pub struct StartGameSessionPlacementInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartGameSessionPlacementOutput { ///

Object that describes the newly created game session placement. This object includes all the information provided in the request, as well as start/end time stamps and placement status.

#[serde(rename = "GameSessionPlacement")] @@ -2610,7 +2644,7 @@ pub struct StartMatchBackfillInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartMatchBackfillOutput { ///

Ticket representing the backfill matchmaking request. This object includes the information in the request, ticket status, and match results as generated during the matchmaking process.

#[serde(rename = "MatchmakingTicket")] @@ -2635,7 +2669,7 @@ pub struct StartMatchmakingInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartMatchmakingOutput { ///

Ticket representing the matchmaking request. This object include the information included in the request, ticket status, and match results as generated during the matchmaking process.

#[serde(rename = "MatchmakingTicket")] @@ -2654,7 +2688,7 @@ pub struct StopFleetActionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopFleetActionsOutput {} ///

Represents the input for a request action.

@@ -2667,7 +2701,7 @@ pub struct StopGameSessionPlacementInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopGameSessionPlacementOutput { ///

Object that describes the canceled game session placement, with CANCELLED status and an end time stamp.

#[serde(rename = "GameSessionPlacement")] @@ -2684,7 +2718,7 @@ pub struct StopMatchmakingInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopMatchmakingOutput {} ///

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

@@ -2717,7 +2751,7 @@ pub struct UpdateAliasInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAliasOutput { ///

Object that contains the updated alias configuration.

#[serde(rename = "Alias")] @@ -2743,7 +2777,7 @@ pub struct UpdateBuildInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBuildOutput { ///

Object that contains the updated build record.

#[serde(rename = "Build")] @@ -2781,7 +2815,7 @@ pub struct UpdateFleetAttributesInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFleetAttributesOutput { ///

Unique identifier for a fleet that was updated.

#[serde(rename = "FleetId")] @@ -2811,7 +2845,7 @@ pub struct UpdateFleetCapacityInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFleetCapacityOutput { ///

Unique identifier for a fleet that was updated.

#[serde(rename = "FleetId")] @@ -2837,7 +2871,7 @@ pub struct UpdateFleetPortSettingsInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFleetPortSettingsOutput { ///

Unique identifier for a fleet that was updated.

#[serde(rename = "FleetId")] @@ -2871,7 +2905,7 @@ pub struct UpdateGameSessionInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGameSessionOutput { ///

Object that contains the updated game session metadata.

#[serde(rename = "GameSession")] @@ -2901,7 +2935,7 @@ pub struct UpdateGameSessionQueueInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGameSessionQueueOutput { ///

Object that describes the newly updated game session queue.

#[serde(rename = "GameSessionQueue")] @@ -2912,7 +2946,7 @@ pub struct UpdateGameSessionQueueOutput { ///

Represents the input for a request action.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateMatchmakingConfigurationInput { - ///

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

+ ///

Flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

#[serde(rename = "AcceptanceRequired")] #[serde(skip_serializing_if = "Option::is_none")] pub acceptance_required: Option, @@ -2924,7 +2958,11 @@ pub struct UpdateMatchmakingConfigurationInput { #[serde(rename = "AdditionalPlayerCount")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_player_count: Option, - ///

Information to attached to all events related to the matchmaking configuration.

+ ///

Method used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

+ #[serde(rename = "BackfillMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub backfill_mode: Option, + ///

Information to add to all events related to the matchmaking configuration.

#[serde(rename = "CustomEventData")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_event_data: Option, @@ -2940,7 +2978,7 @@ pub struct UpdateMatchmakingConfigurationInput { #[serde(rename = "GameSessionData")] #[serde(skip_serializing_if = "Option::is_none")] pub game_session_data: Option, - ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

+ ///

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

#[serde(rename = "GameSessionQueueArns")] #[serde(skip_serializing_if = "Option::is_none")] pub game_session_queue_arns: Option>, @@ -2951,7 +2989,7 @@ pub struct UpdateMatchmakingConfigurationInput { #[serde(rename = "NotificationTarget")] #[serde(skip_serializing_if = "Option::is_none")] pub notification_target: Option, - ///

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that time out can be resubmitted as needed.

+ ///

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

#[serde(rename = "RequestTimeoutSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub request_timeout_seconds: Option, @@ -2963,7 +3001,7 @@ pub struct UpdateMatchmakingConfigurationInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMatchmakingConfigurationOutput { ///

Object that describes the updated matchmaking configuration.

#[serde(rename = "Configuration")] @@ -2984,7 +3022,7 @@ pub struct UpdateRuntimeConfigurationInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRuntimeConfigurationOutput { ///

The run-time configuration currently in force. If the update was successful, this object matches the one in the request.

#[serde(rename = "RuntimeConfiguration")] @@ -3021,7 +3059,7 @@ pub struct UpdateScriptInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateScriptOutput { ///

The newly created script record with a unique script ID. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift service.

#[serde(rename = "Script")] @@ -3039,9 +3077,9 @@ pub struct ValidateMatchmakingRuleSetInput { ///

Represents the returned data in response to a request action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValidateMatchmakingRuleSetOutput { - ///

Response indicating whether or not the rule set is valid.

+ ///

Response indicating whether the rule set is valid.

#[serde(rename = "Valid")] #[serde(skip_serializing_if = "Option::is_none")] pub valid: Option, @@ -3049,7 +3087,7 @@ pub struct ValidateMatchmakingRuleSetOutput { ///

Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcPeeringAuthorization { ///

Time stamp indicating when this authorization was issued. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057").

#[serde(rename = "CreationTime")] @@ -3075,7 +3113,7 @@ pub struct VpcPeeringAuthorization { ///

Represents a peering connection between a VPC on one of your AWS accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcPeeringConnection { ///

Unique identifier for a fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

#[serde(rename = "FleetId")] @@ -3105,7 +3143,7 @@ pub struct VpcPeeringConnection { ///

Represents status information for a VPC peering connection. Status is associated with a VpcPeeringConnection object. Status codes and messages are provided from EC2 (see VpcPeeringConnectionStateReason). Connection status information is also communicated as a fleet Event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcPeeringConnectionStatus { ///

Code indicating the status of a VPC peering connection.

#[serde(rename = "Code")] @@ -7280,7 +7318,7 @@ impl Error for ValidateMatchmakingRuleSetError { } /// Trait representing the capabilities of the Amazon GameLift API. Amazon GameLift clients implement this trait. pub trait GameLift { - ///

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

+ ///

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

fn accept_match( &self, input: AcceptMatchInput, @@ -7316,13 +7354,13 @@ pub trait GameLift { input: CreateGameSessionQueueInput, ) -> RusotoFuture; - ///

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.

Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only "best effort" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

+ ///

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration. Since notifications promise only "best effort" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

Learn more

Design a FlexMatch Matchmaker

Setting up Notifications for Matchmaking

Related operations

fn create_matchmaking_configuration( &self, input: CreateMatchmakingConfigurationInput, ) -> RusotoFuture; - ///

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same region as the matchmaking configuration they will be used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

+ ///

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

fn create_matchmaking_rule_set( &self, input: CreateMatchmakingRuleSetInput, @@ -7364,7 +7402,7 @@ pub trait GameLift { ///

Deletes a build. This action permanently deletes the build record and any uploaded build files.

To delete a build, specify its ID. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

Learn more

Working with Builds

Related operations

fn delete_build(&self, input: DeleteBuildInput) -> RusotoFuture<(), DeleteBuildError>; - ///

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

+ ///

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

fn delete_fleet(&self, input: DeleteFleetInput) -> RusotoFuture<(), DeleteFleetError>; ///

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

@@ -7373,7 +7411,7 @@ pub trait GameLift { input: DeleteGameSessionQueueInput, ) -> RusotoFuture; - ///

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

+ ///

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Related operations

fn delete_matchmaking_configuration( &self, input: DeleteMatchmakingConfigurationInput, @@ -7394,7 +7432,7 @@ pub trait GameLift { ///

Deletes a Realtime script. This action permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

Learn more

Amazon GameLift Realtime Servers

Related operations

fn delete_script(&self, input: DeleteScriptInput) -> RusotoFuture<(), DeleteScriptError>; - ///

Cancels a pending VPC peering authorization for the specified VPC. If the authorization has already been used to create a peering connection, call DeleteVpcPeeringConnection to remove the connection.

+ ///

Cancels a pending VPC peering authorization for the specified VPC. If you need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection.

fn delete_vpc_peering_authorization( &self, input: DeleteVpcPeeringAuthorizationInput, @@ -7484,13 +7522,13 @@ pub trait GameLift { input: DescribeInstancesInput, ) -> RusotoFuture; - ///

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

+ ///

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

fn describe_matchmaking( &self, input: DescribeMatchmakingInput, ) -> RusotoFuture; - ///

Retrieves the details of FlexMatch matchmaking configurations. with this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

+ ///

Retrieves the details of FlexMatch matchmaking configurations. With this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

fn describe_matchmaking_configurations( &self, input: DescribeMatchmakingConfigurationsInput, @@ -7609,13 +7647,13 @@ pub trait GameLift { input: StartGameSessionPlacementInput, ) -> RusotoFuture; - ///

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Backfill Existing Games with FlexMatch.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

+ ///

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

fn start_match_backfill( &self, input: StartMatchBackfillInput, ) -> RusotoFuture; - ///

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

+ ///

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

fn start_matchmaking( &self, input: StartMatchmakingInput, @@ -7633,7 +7671,7 @@ pub trait GameLift { input: StopGameSessionPlacementInput, ) -> RusotoFuture; - ///

Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

+ ///

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the action is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

fn stop_matchmaking( &self, input: StopMatchmakingInput, @@ -7681,7 +7719,7 @@ pub trait GameLift { input: UpdateGameSessionQueueInput, ) -> RusotoFuture; - ///

Updates settings for a FlexMatch matchmaking configuration. To update settings, specify the configuration name to be updated and provide the new settings.

+ ///

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

fn update_matchmaking_configuration( &self, input: UpdateMatchmakingConfigurationInput, @@ -7717,10 +7755,7 @@ impl GameLiftClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> GameLiftClient { - GameLiftClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7734,15 +7769,19 @@ impl GameLiftClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - GameLiftClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> GameLiftClient { + GameLiftClient { client, region } } } impl GameLift for GameLiftClient { - ///

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

+ ///

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

fn accept_match( &self, input: AcceptMatchInput, @@ -7915,7 +7954,7 @@ impl GameLift for GameLiftClient { }) } - ///

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.

Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only "best effort" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

+ ///

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration. Since notifications promise only "best effort" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

Learn more

Design a FlexMatch Matchmaker

Setting up Notifications for Matchmaking

Related operations

fn create_matchmaking_configuration( &self, input: CreateMatchmakingConfigurationInput, @@ -7942,7 +7981,7 @@ impl GameLift for GameLiftClient { }) } - ///

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same region as the matchmaking configuration they will be used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

+ ///

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

fn create_matchmaking_rule_set( &self, input: CreateMatchmakingRuleSetInput, @@ -8151,7 +8190,7 @@ impl GameLift for GameLiftClient { }) } - ///

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

+ ///

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

fn delete_fleet(&self, input: DeleteFleetInput) -> RusotoFuture<(), DeleteFleetError> { let mut request = SignedRequest::new("POST", "gamelift", &self.region, "/"); @@ -8202,7 +8241,7 @@ impl GameLift for GameLiftClient { }) } - ///

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

+ ///

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Related operations

fn delete_matchmaking_configuration( &self, input: DeleteMatchmakingConfigurationInput, @@ -8303,7 +8342,7 @@ impl GameLift for GameLiftClient { }) } - ///

Cancels a pending VPC peering authorization for the specified VPC. If the authorization has already been used to create a peering connection, call DeleteVpcPeeringConnection to remove the connection.

+ ///

Cancels a pending VPC peering authorization for the specified VPC. If you need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection.

fn delete_vpc_peering_authorization( &self, input: DeleteVpcPeeringAuthorizationInput, @@ -8708,7 +8747,7 @@ impl GameLift for GameLiftClient { }) } - ///

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

+ ///

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

fn describe_matchmaking( &self, input: DescribeMatchmakingInput, @@ -8736,7 +8775,7 @@ impl GameLift for GameLiftClient { }) } - ///

Retrieves the details of FlexMatch matchmaking configurations. with this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

+ ///

Retrieves the details of FlexMatch matchmaking configurations. With this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

fn describe_matchmaking_configurations( &self, input: DescribeMatchmakingConfigurationsInput, @@ -9294,7 +9333,7 @@ impl GameLift for GameLiftClient { }) } - ///

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Backfill Existing Games with FlexMatch.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

+ ///

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

fn start_match_backfill( &self, input: StartMatchBackfillInput, @@ -9323,7 +9362,7 @@ impl GameLift for GameLiftClient { }) } - ///

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

+ ///

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

fn start_matchmaking( &self, input: StartMatchmakingInput, @@ -9407,7 +9446,7 @@ impl GameLift for GameLiftClient { }) } - ///

Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

+ ///

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the action is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

fn stop_matchmaking( &self, input: StopMatchmakingInput, @@ -9633,7 +9672,7 @@ impl GameLift for GameLiftClient { }) } - ///

Updates settings for a FlexMatch matchmaking configuration. To update settings, specify the configuration name to be updated and provide the new settings.

+ ///

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

fn update_matchmaking_configuration( &self, input: UpdateMatchmakingConfigurationInput, diff --git a/rusoto/services/glacier/Cargo.toml b/rusoto/services/glacier/Cargo.toml index e60c1700268..d421dbdc8e2 100644 --- a/rusoto/services/glacier/Cargo.toml +++ b/rusoto/services/glacier/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_glacier" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/glacier/README.md b/rusoto/services/glacier/README.md index 616ff6b565a..9ac36127aec 100644 --- a/rusoto/services/glacier/README.md +++ b/rusoto/services/glacier/README.md @@ -23,9 +23,16 @@ To use `rusoto_glacier` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_glacier = "0.40.0" +rusoto_glacier = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/glacier/src/custom/custom_tests.rs b/rusoto/services/glacier/src/custom/custom_tests.rs index aaa51d7941b..d12de8d50a4 100644 --- a/rusoto/services/glacier/src/custom/custom_tests.rs +++ b/rusoto/services/glacier/src/custom/custom_tests.rs @@ -2,8 +2,8 @@ extern crate rusoto_mock; use crate::generated::*; -use rusoto_core::Region; use self::rusoto_mock::*; +use rusoto_core::Region; #[test] fn test_initiate_multipart_part_response() { @@ -40,5 +40,9 @@ fn test_upload_multipart_part_response() { .upload_multipart_part(upload_part_copy_req) .sync() .expect("Should parse empty body"); - assert_eq!(result.checksum.unwrap(), "42", "Should handle checksum in response"); -} \ No newline at end of file + assert_eq!( + result.checksum.unwrap(), + "42", + "Should handle checksum in response" + ); +} diff --git a/rusoto/services/glacier/src/custom/mod.rs b/rusoto/services/glacier/src/custom/mod.rs index 9a14b939383..e4234693714 100644 --- a/rusoto/services/glacier/src/custom/mod.rs +++ b/rusoto/services/glacier/src/custom/mod.rs @@ -1,2 +1,2 @@ #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/glacier/src/generated.rs b/rusoto/services/glacier/src/generated.rs index 2d3e0afa06c..c4cb1c8cc06 100644 --- a/rusoto/services/glacier/src/generated.rs +++ b/rusoto/services/glacier/src/generated.rs @@ -9,26 +9,25 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; -///

Provides options to abort a multipart upload identified by the upload ID.

For information about the underlying REST API, see Abort Multipart Upload. For conceptual information, see Working with Archives in Amazon Glacier.

+///

Provides options to abort a multipart upload identified by the upload ID.

For information about the underlying REST API, see Abort Multipart Upload. For conceptual information, see Working with Archives in Amazon S3 Glacier.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct AbortMultipartUploadInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The upload ID of the multipart upload to delete.

@@ -57,7 +56,7 @@ pub struct AddTagsToVaultInput { #[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -65,15 +64,15 @@ pub struct AddTagsToVaultInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

For information about the underlying REST API, see Upload Archive. For conceptual information, see Working with Archives in Amazon Glacier.

+///

Contains the Amazon S3 Glacier response to your request.

For information about the underlying REST API, see Upload Archive. For conceptual information, see Working with Archives in Amazon S3 Glacier.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArchiveCreationOutput { ///

The ID of the archive. This value is also included as part of the location.

#[serde(rename = "archiveId")] #[serde(skip_serializing_if = "Option::is_none")] pub archive_id: Option, - ///

The checksum of the archive computed by Amazon Glacier.

+ ///

The checksum of the archive computed by Amazon S3 Glacier.

#[serde(rename = "checksum")] #[serde(skip_serializing_if = "Option::is_none")] pub checksum: Option, @@ -137,17 +136,17 @@ pub struct CSVOutput { pub record_delimiter: Option, } -///

Provides options to complete a multipart upload operation. This informs Amazon Glacier that all the archive parts have been uploaded and Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource.

+///

Provides options to complete a multipart upload operation. This informs Amazon Glacier that all the archive parts have been uploaded and Amazon S3 Glacier (Glacier) can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CompleteMultipartUploadInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The total size, in bytes, of the entire archive. This value should be the sum of all the sizes of the individual parts that you uploaded.

#[serde(rename = "archiveSize")] #[serde(skip_serializing_if = "Option::is_none")] pub archive_size: Option, - ///

The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier, Amazon Glacier returns an error and the request fails.

+ ///

The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon S3 Glacier (Glacier), Glacier returns an error and the request fails.

#[serde(rename = "checksum")] #[serde(skip_serializing_if = "Option::is_none")] pub checksum: Option, @@ -176,7 +175,7 @@ pub struct CompleteVaultLockInput { ///

Provides options to create a vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateVaultInput { - ///

The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -184,9 +183,9 @@ pub struct CreateVaultInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateVaultOutput { ///

The URI of the vault that was created.

#[serde(rename = "location")] @@ -216,10 +215,10 @@ pub struct DataRetrievalRule { pub strategy: Option, } -///

Provides options for deleting an archive from an Amazon Glacier vault.

+///

Provides options for deleting an archive from an Amazon S3 Glacier vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteArchiveInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The ID of the archive to delete.

@@ -233,7 +232,7 @@ pub struct DeleteArchiveInput { ///

DeleteVaultAccessPolicy input.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteVaultAccessPolicyInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -241,10 +240,10 @@ pub struct DeleteVaultAccessPolicyInput { pub vault_name: String, } -///

Provides options for deleting a vault from Amazon Glacier.

+///

Provides options for deleting a vault from Amazon S3 Glacier.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteVaultInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -255,7 +254,7 @@ pub struct DeleteVaultInput { ///

Provides options for deleting a vault notification configuration from an Amazon Glacier vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteVaultNotificationsInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -266,7 +265,7 @@ pub struct DeleteVaultNotificationsInput { ///

Provides options for retrieving a job description.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeJobInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The ID of the job to describe.

@@ -280,7 +279,7 @@ pub struct DescribeJobInput { ///

Provides options for retrieving metadata for a specific vault in Amazon Glacier.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeVaultInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -288,15 +287,15 @@ pub struct DescribeVaultInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeVaultOutput { ///

The Universal Coordinated Time (UTC) date when the vault was created. This value should be a string in the ISO 8601 date format, for example 2012-03-20T17:03:43.221Z.

#[serde(rename = "CreationDate")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_date: Option, - ///

The Universal Coordinated Time (UTC) date when Amazon Glacier completed the last vault inventory. This value should be a string in the ISO 8601 date format, for example 2012-03-20T17:03:43.221Z.

+ ///

The Universal Coordinated Time (UTC) date when Amazon S3 Glacier completed the last vault inventory. This value should be a string in the ISO 8601 date format, for example 2012-03-20T17:03:43.221Z.

#[serde(rename = "LastInventoryDate")] #[serde(skip_serializing_if = "Option::is_none")] pub last_inventory_date: Option, @@ -343,9 +342,9 @@ pub struct GetDataRetrievalPolicyInput { pub account_id: String, } -///

Contains the Amazon Glacier response to the GetDataRetrievalPolicy request.

+///

Contains the Amazon S3 Glacier response to the GetDataRetrievalPolicy request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDataRetrievalPolicyOutput { ///

Contains the returned data retrieval policy in JSON format.

#[serde(rename = "Policy")] @@ -353,16 +352,16 @@ pub struct GetDataRetrievalPolicyOutput { pub policy: Option, } -///

Provides options for downloading output of an Amazon Glacier job.

+///

Provides options for downloading output of an Amazon S3 Glacier job.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetJobOutputInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The job ID whose data is downloaded.

#[serde(rename = "jobId")] pub job_id: String, - ///

The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify the range as bytes=0-1048575. By default, this operation downloads the entire output.

If the job output is large, then you can use a range to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output:

  1. Download a 128 MB chunk of output by specifying the appropriate byte range. Verify that all 128 MB of data was received.

  2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data.

  3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range.

  4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors.

+ ///

The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify the range as bytes=0-1048575. By default, this operation downloads the entire output.

If the job output is large, then you can use a range to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output:

  1. Download a 128 MB chunk of output by specifying the appropriate byte range. Verify that all 128 MB of data was received.

  2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data.

  3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range.

  4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon S3 Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors.

#[serde(rename = "range")] #[serde(skip_serializing_if = "Option::is_none")] pub range: Option, @@ -371,7 +370,7 @@ pub struct GetJobOutputInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq)] pub struct GetJobOutputOutput { ///

Indicates the range units accepted. For more information, see RFC2616.

@@ -382,7 +381,7 @@ pub struct GetJobOutputOutput { pub body: Option, ///

The checksum of the data in the response. This header is returned only when retrieving the output for an archive retrieval job. Furthermore, this header appears only under the following conditions:

  • You get the entire range of the archive.

  • You request a range to return of the archive that starts and ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and you specify a range to return that starts at 1 MB and ends at 2 MB, then the x-amz-sha256-tree-hash is returned as a response header.

  • You request a range of the archive to return that starts on a multiple of 1 MB and goes to the end of the archive. For example, if you have a 3.1 MB archive and you specify a range that starts at 2 MB and ends at 3.1 MB (the end of the archive), then the x-amz-sha256-tree-hash is returned as a response header.

pub checksum: Option, - ///

The range of bytes returned by Amazon Glacier. If only partial output is downloaded, the response provides the range of bytes Amazon Glacier returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB.

+ ///

The range of bytes returned by Amazon S3 Glacier. If only partial output is downloaded, the response provides the range of bytes Amazon S3 Glacier returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB.

pub content_range: Option, ///

The Content-Type depends on whether the job output is an archive or a vault inventory. For archive data, the Content-Type is application/octet-stream. For vault inventory, if you requested CSV format when you initiated the job, the Content-Type is text/csv. Otherwise, by default, vault inventory is returned as JSON, and the Content-Type is application/json.

pub content_type: Option, @@ -393,7 +392,7 @@ pub struct GetJobOutputOutput { ///

Input for GetVaultAccessPolicy.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetVaultAccessPolicyInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -403,7 +402,7 @@ pub struct GetVaultAccessPolicyInput { ///

Output for GetVaultAccessPolicy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVaultAccessPolicyOutput { ///

Contains the returned vault access policy as a JSON string.

#[serde(rename = "policy")] @@ -414,7 +413,7 @@ pub struct GetVaultAccessPolicyOutput { ///

The input values for GetVaultLock.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetVaultLockInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -422,9 +421,9 @@ pub struct GetVaultLockInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVaultLockOutput { ///

The UTC date and time at which the vault lock was put into the InProgress state.

#[serde(rename = "CreationDate")] @@ -447,7 +446,7 @@ pub struct GetVaultLockOutput { ///

Provides options for retrieving the notification configuration set on an Amazon Glacier vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetVaultNotificationsInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -455,9 +454,9 @@ pub struct GetVaultNotificationsInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVaultNotificationsOutput { ///

Returns the notification configuration set on the vault.

#[serde(rename = "vaultNotificationConfig")] @@ -465,9 +464,9 @@ pub struct GetVaultNotificationsOutput { pub vault_notification_config: Option, } -///

Contains the description of an Amazon Glacier job.

+///

Contains the description of an Amazon S3 Glacier job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GlacierJobDescription { ///

The job type. This value is either ArchiveRetrieval, InventoryRetrieval, or Select.

#[serde(rename = "Action")] @@ -509,7 +508,7 @@ pub struct GlacierJobDescription { #[serde(rename = "JobDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub job_description: Option, - ///

An opaque string that identifies an Amazon Glacier job.

+ ///

An opaque string that identifies an Amazon S3 Glacier job.

#[serde(rename = "JobId")] #[serde(skip_serializing_if = "Option::is_none")] pub job_id: Option, @@ -592,10 +591,10 @@ pub struct Grantee { pub uri: Option, } -///

Provides options for initiating an Amazon Glacier job.

+///

Provides options for initiating an Amazon S3 Glacier job.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct InitiateJobInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

Provides options for specifying job information.

@@ -607,9 +606,9 @@ pub struct InitiateJobInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateJobOutput { ///

The ID of the job.

#[serde(rename = "jobId")] @@ -625,10 +624,10 @@ pub struct InitiateJobOutput { pub location: Option, } -///

Provides options for initiating a multipart upload to an Amazon Glacier vault.

+///

Provides options for initiating a multipart upload to an Amazon S3 Glacier vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct InitiateMultipartUploadInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The archive description that you are uploading in parts.

The part size must be a megabyte (1024 KB) multiplied by a power of 2, for example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 MB).

@@ -644,11 +643,11 @@ pub struct InitiateMultipartUploadInput { pub vault_name: String, } -///

The Amazon Glacier response to your request.

+///

The Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateMultipartUploadOutput { - ///

The relative URI path of the multipart upload ID Amazon Glacier created.

+ ///

The relative URI path of the multipart upload ID Amazon S3 Glacier created.

#[serde(rename = "location")] #[serde(skip_serializing_if = "Option::is_none")] pub location: Option, @@ -673,9 +672,9 @@ pub struct InitiateVaultLockInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateVaultLockOutput { ///

The lock ID, which is used to complete the vault locking process.

#[serde(rename = "lockId")] @@ -694,7 +693,7 @@ pub struct InputSerialization { ///

Describes the options for a range inventory retrieval job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryRetrievalJobDescription { ///

The end of the date range in UTC for vault inventory retrieval that includes archives created before this date. This value should be a string in the ISO 8601 date format, for example 2013-03-20T17:03:43Z.

#[serde(rename = "EndDate")] @@ -708,7 +707,7 @@ pub struct InventoryRetrievalJobDescription { #[serde(rename = "Limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

An opaque string that represents where to continue pagination of the vault inventory retrieval results. You use the marker in a new InitiateJob request to obtain additional inventory items. If there are no more inventory items, this value is null. For more information, see Range Inventory Retrieval.

+ ///

An opaque string that represents where to continue pagination of the vault inventory retrieval results. You use the marker in a new InitiateJob request to obtain additional inventory items. If there are no more inventory items, this value is null. For more information, see Range Inventory Retrieval.

#[serde(rename = "Marker")] #[serde(skip_serializing_if = "Option::is_none")] pub marker: Option, @@ -766,7 +765,7 @@ pub struct JobParameters { #[serde(rename = "RetrievalByteRange")] #[serde(skip_serializing_if = "Option::is_none")] pub retrieval_byte_range: Option, - ///

The Amazon SNS topic ARN to which Amazon Glacier sends a notification when the job is completed and the output is ready for you to download. The specified topic publishes the notification to its subscribers. The SNS topic must exist.

+ ///

The Amazon SNS topic ARN to which Amazon S3 Glacier sends a notification when the job is completed and the output is ready for you to download. The specified topic publishes the notification to its subscribers. The SNS topic must exist.

#[serde(rename = "SNSTopic")] #[serde(skip_serializing_if = "Option::is_none")] pub sns_topic: Option, @@ -784,10 +783,10 @@ pub struct JobParameters { pub type_: Option, } -///

Provides options for retrieving a job list for an Amazon Glacier vault.

+///

Provides options for retrieving a job list for an Amazon S3 Glacier vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListJobsInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The state of the jobs to return. You can specify true or false.

@@ -811,9 +810,9 @@ pub struct ListJobsInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsOutput { ///

A list of job objects. Each job object contains metadata describing the job.

#[serde(rename = "JobList")] @@ -828,7 +827,7 @@ pub struct ListJobsOutput { ///

Provides options for retrieving list of in-progress multipart uploads for an Amazon Glacier vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListMultipartUploadsInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 50 uploads.

@@ -844,9 +843,9 @@ pub struct ListMultipartUploadsInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListMultipartUploadsOutput { ///

An opaque string that represents where to continue pagination of the results. You use the marker in a new List Multipart Uploads request to obtain more uploads in the list. If there are no more uploads, this value is null.

#[serde(rename = "Marker")] @@ -861,7 +860,7 @@ pub struct ListMultipartUploadsOutput { ///

Provides options for retrieving a list of parts of an archive that have been uploaded in a specific multipart upload.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListPartsInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The maximum number of parts to be returned. The default limit is 50. The number of parts returned might be fewer than the specified limit, but the number of returned parts never exceeds the limit.

@@ -880,9 +879,9 @@ pub struct ListPartsInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPartsOutput { ///

The description of the archive that was specified in the Initiate Multipart Upload request.

#[serde(rename = "ArchiveDescription")] @@ -916,13 +915,13 @@ pub struct ListPartsOutput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListProvisionedCapacityInput { - ///

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

+ ///

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProvisionedCapacityOutput { ///

The response body contains the following JSON fields.

#[serde(rename = "ProvisionedCapacityList")] @@ -933,7 +932,7 @@ pub struct ListProvisionedCapacityOutput { ///

The input value for ListTagsForVaultInput.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListTagsForVaultInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -941,9 +940,9 @@ pub struct ListTagsForVaultInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForVaultOutput { ///

The tags attached to the vault. Each tag is composed of a key and a value.

#[serde(rename = "Tags")] @@ -967,9 +966,9 @@ pub struct ListVaultsInput { pub marker: Option, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVaultsOutput { ///

The vault ARN at which to continue pagination of the results. You use the marker in another List Vaults request to obtain more vaults in the list.

#[serde(rename = "Marker")] @@ -1001,13 +1000,13 @@ pub struct OutputSerialization { ///

A list of the part sizes of the multipart upload.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PartListElement { ///

The byte range of a part, inclusive of the upper value of the range.

#[serde(rename = "RangeInBytes")] #[serde(skip_serializing_if = "Option::is_none")] pub range_in_bytes: Option, - ///

The SHA256 tree hash value that Amazon Glacier calculated for the part. This field is never null.

+ ///

The SHA256 tree hash value that Amazon S3 Glacier calculated for the part. This field is never null.

#[serde(rename = "SHA256TreeHash")] #[serde(skip_serializing_if = "Option::is_none")] pub sha256_tree_hash: Option, @@ -1015,7 +1014,7 @@ pub struct PartListElement { ///

The definition for a provisioned capacity unit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionedCapacityDescription { ///

The ID that identifies the provisioned capacity unit.

#[serde(rename = "CapacityId")] @@ -1033,13 +1032,13 @@ pub struct ProvisionedCapacityDescription { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PurchaseProvisionedCapacityInput { - ///

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

+ ///

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PurchaseProvisionedCapacityOutput { ///

The ID that identifies the provisioned capacity unit.

#[serde(rename = "capacityId")] @@ -1054,7 +1053,7 @@ pub struct RemoveTagsFromVaultInput { #[serde(rename = "TagKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub tag_keys: Option>, - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -1135,7 +1134,7 @@ pub struct SetDataRetrievalPolicyInput { ///

SetVaultAccessPolicy input.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SetVaultAccessPolicyInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The vault access policy as a JSON string.

@@ -1150,7 +1149,7 @@ pub struct SetVaultAccessPolicyInput { ///

Provides options to configure notifications that will be sent when specific events happen to a vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SetVaultNotificationsInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The name of the vault.

@@ -1165,7 +1164,7 @@ pub struct SetVaultNotificationsInput { ///

Provides options to add an archive to a vault.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UploadArchiveInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The optional description of the archive you are uploading.

@@ -1192,7 +1191,7 @@ pub struct UploadArchiveInput { ///

A list of in-progress multipart uploads for a vault.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UploadListElement { ///

The description of the archive that was specified in the Initiate Multipart Upload request.

#[serde(rename = "ArchiveDescription")] @@ -1219,7 +1218,7 @@ pub struct UploadListElement { ///

Provides options to upload a part of an archive in a multipart upload operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UploadMultipartPartInput { - ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

+ ///

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

#[serde(rename = "accountId")] pub account_id: String, ///

The data to upload.

@@ -1235,7 +1234,7 @@ pub struct UploadMultipartPartInput { #[serde(rename = "checksum")] #[serde(skip_serializing_if = "Option::is_none")] pub checksum: Option, - ///

Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*.

+ ///

Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon S3 Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*.

#[serde(rename = "range")] #[serde(skip_serializing_if = "Option::is_none")] pub range: Option, @@ -1247,11 +1246,11 @@ pub struct UploadMultipartPartInput { pub vault_name: String, } -///

Contains the Amazon Glacier response to your request.

+///

Contains the Amazon S3 Glacier response to your request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UploadMultipartPartOutput { - ///

The SHA256 tree hash that Amazon Glacier computed for the uploaded part.

+ ///

The SHA256 tree hash that Amazon S3 Glacier computed for the uploaded part.

#[serde(rename = "checksum")] #[serde(skip_serializing_if = "Option::is_none")] pub checksum: Option, @@ -1278,7 +1277,7 @@ pub struct VaultLockPolicy { ///

Represents a vault's notification configuration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VaultNotificationConfig { - ///

A list of one or more events for which Amazon Glacier will send a notification to the specified Amazon SNS topic.

+ ///

A list of one or more events for which Amazon S3 Glacier will send a notification to the specified Amazon SNS topic.

#[serde(rename = "Events")] #[serde(skip_serializing_if = "Option::is_none")] pub events: Option>, @@ -3016,7 +3015,7 @@ pub enum UploadArchiveError { InvalidParameterValue(String), ///

Returned if a required header or parameter is missing from the request.

MissingParameterValue(String), - ///

Returned if, when uploading an archive, Amazon Glacier times out while receiving the upload.

+ ///

Returned if, when uploading an archive, Amazon S3 Glacier times out while receiving the upload.

RequestTimeout(String), ///

Returned if the specified resource (such as a vault, upload ID, or job ID) doesn't exist.

ResourceNotFound(String), @@ -3073,7 +3072,7 @@ pub enum UploadMultipartPartError { InvalidParameterValue(String), ///

Returned if a required header or parameter is missing from the request.

MissingParameterValue(String), - ///

Returned if, when uploading an archive, Amazon Glacier times out while receiving the upload.

+ ///

Returned if, when uploading an archive, Amazon S3 Glacier times out while receiving the upload.

RequestTimeout(String), ///

Returned if the specified resource (such as a vault, upload ID, or job ID) doesn't exist.

ResourceNotFound(String), @@ -3133,128 +3132,128 @@ impl Error for UploadMultipartPartError { } /// Trait representing the capabilities of the Amazon Glacier API. Amazon Glacier clients implement this trait. pub trait Glacier { - ///

This operation aborts a multipart upload identified by the upload ID.

After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Working with Archives in Amazon Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

+ ///

This operation aborts a multipart upload identified by the upload ID.

After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Working with Archives in Amazon S3 Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

fn abort_multipart_upload( &self, input: AbortMultipartUploadInput, ) -> RusotoFuture<(), AbortMultipartUploadError>; - ///

This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

+ ///

This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

fn abort_vault_lock(&self, input: AbortVaultLockInput) -> RusotoFuture<(), AbortVaultLockError>; - ///

This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon Glacier Resources.

+ ///

This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon S3 Glacier Resources.

fn add_tags_to_vault( &self, input: AddTagsToVaultInput, ) -> RusotoFuture<(), AddTagsToVaultError>; - ///

You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Amazon Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Amazon Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

Additionally, Amazon Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Amazon Glacier returns an error and the operation fails.

Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

+ ///

You call this operation to inform Amazon S3 Glacier (Glacier) that all the archive parts have been uploaded and that Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

Additionally, Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Glacier returns an error and the operation fails.

Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

fn complete_multipart_upload( &self, input: CompleteMultipartUploadInput, ) -> RusotoFuture; - ///

This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

+ ///

This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

fn complete_vault_lock( &self, input: CompleteVaultLockInput, ) -> RusotoFuture<(), CompleteVaultLockError>; - ///

This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier.

You must use the following guidelines when naming a vault.

  • Names can be between 1 and 255 characters long.

  • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

+ ///

This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon S3 Glacier.

You must use the following guidelines when naming a vault.

  • Names can be between 1 and 255 characters long.

  • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

fn create_vault( &self, input: CreateVaultInput, ) -> RusotoFuture; - ///

This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

  • If the archive retrieval job is actively preparing the data for download when Amazon Glacier receives the delete archive request, the archival retrieval operation might fail.

  • If the archive retrieval job has successfully prepared the archive for download when Amazon Glacier receives the delete archive request, you will be able to download the output.

This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

+ ///

This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

  • If the archive retrieval job is actively preparing the data for download when Amazon S3 Glacier receives the delete archive request, the archival retrieval operation might fail.

  • If the archive retrieval job has successfully prepared the archive for download when Amazon S3 Glacier receives the delete archive request, you will be able to download the output.

This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

fn delete_archive(&self, input: DeleteArchiveInput) -> RusotoFuture<(), DeleteArchiveError>; - ///

This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon Glacier Developer Guide.

+ ///

This operation deletes a vault. Amazon S3 Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon S3 Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon S3 Glacier Developer Guide.

fn delete_vault(&self, input: DeleteVaultInput) -> RusotoFuture<(), DeleteVaultError>; - ///

This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

+ ///

This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon S3 Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

fn delete_vault_access_policy( &self, input: DeleteVaultAccessPolicyInput, ) -> RusotoFuture<(), DeleteVaultAccessPolicyError>; - ///

This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Delete Vault Notification Configuration in the Amazon Glacier Developer Guide.

+ ///

This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon S3 Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Delete Vault Notification Configuration in the Amazon S3 Glacier Developer Guide.

fn delete_vault_notifications( &self, input: DeleteVaultNotificationsInput, ) -> RusotoFuture<(), DeleteVaultNotificationsError>; - ///

This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob.

This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Amazon Glacier can notify the topic after it completes the job.

A job ID will not expire for at least 24 hours after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For more information about using this operation, see the documentation for the underlying REST API Describe Job in the Amazon Glacier Developer Guide.

+ ///

This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon S3 Glacier (Glacier) completes the job. For more information about initiating a job, see InitiateJob.

This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Glacier can notify the topic after it completes the job.

A job ID will not expire for at least 24 hours after Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For more information about using this operation, see the documentation for the underlying REST API Describe Job in the Amazon Glacier Developer Guide.

fn describe_job( &self, input: DescribeJobInput, ) -> RusotoFuture; - ///

This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and Describe Vault in the Amazon Glacier Developer Guide.

+ ///

This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon S3 Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon S3 Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon S3 Glacier and Describe Vault in the Amazon Glacier Developer Guide.

fn describe_vault( &self, input: DescribeVaultInput, ) -> RusotoFuture; - ///

This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

+ ///

This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

fn get_data_retrieval_policy( &self, input: GetDataRetrievalPolicyInput, ) -> RusotoFuture; - ///

This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data.

A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the Get Job Output response.

For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of bytes=0-1048575, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon Glacier The expected size is also returned in the headers from the Get Job Output response.

In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected.

A job ID does not expire for at least 24 hours after Amazon Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

+ ///

This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon S3 Glacier (Glacier) returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data.

A job ID will not expire for at least 24 hours after Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the Get Job Output response.

For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of bytes=0-1048575, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon S3 Glacier The expected size is also returned in the headers from the Get Job Output response.

In the case of an archive retrieval job, depending on the byte range you specify, Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected.

A job ID does not expire for at least 24 hours after Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

fn get_job_output( &self, input: GetJobOutputInput, ) -> RusotoFuture; - ///

This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

+ ///

This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

fn get_vault_access_policy( &self, input: GetVaultAccessPolicyInput, ) -> RusotoFuture; - ///

This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

  • The vault lock policy set on the vault.

  • The state of the vault lock, which is either InProgess or Locked.

  • When the lock ID expires. The lock ID is used to complete the vault locking process.

  • When the vault lock was initiated and put into the InProgress state.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

+ ///

This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

  • The vault lock policy set on the vault.

  • The state of the vault lock, which is either InProgess or Locked.

  • When the lock ID expires. The lock ID is used to complete the vault locking process.

  • When the vault lock was initiated and put into the InProgress state.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

fn get_vault_lock( &self, input: GetVaultLockInput, ) -> RusotoFuture; - ///

This operation retrieves the notification-configuration subresource of the specified vault.

For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

+ ///

This operation retrieves the notification-configuration subresource of the specified vault.

For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon S3 Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

fn get_vault_notifications( &self, input: GetVaultNotificationsInput, ) -> RusotoFuture; - ///

This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval. For more information about using this operation, see the documentation for the underlying REST API Initiate a Job.

+ ///

This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval. For more information about using this operation, see the documentation for the underlying REST API Initiate a Job.

fn initiate_job( &self, input: InitiateJobInput, ) -> RusotoFuture; - ///

This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

You don't need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size.

After you complete the multipart upload, Amazon Glacier removes the multipart upload resource referenced by the ID. Amazon Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

+ ///

This operation initiates a multipart upload. Amazon S3 Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

You don't need to know the size of the archive when you start a multipart upload because Amazon S3 Glacier does not require you to specify the overall archive size.

After you complete the multipart upload, Amazon S3 Glacier (Glacier) removes the multipart upload resource referenced by the ID. Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

fn initiate_multipart_upload( &self, input: InitiateMultipartUploadInput, ) -> RusotoFuture; - ///

This operation initiates the vault locking process by doing the following:

  • Installing a vault lock policy on the specified vault.

  • Setting the lock state of vault lock to InProgress.

  • Returning a lock ID, which is used to complete the vault locking process.

You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

+ ///

This operation initiates the vault locking process by doing the following:

  • Installing a vault lock policy on the specified vault.

  • Setting the lock state of vault lock to InProgress.

  • Returning a lock ID, which is used to complete the vault locking process.

You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

fn initiate_vault_lock( &self, input: InitiateVaultLockInput, ) -> RusotoFuture; - ///

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

+ ///

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

fn list_jobs(&self, input: ListJobsInput) -> RusotoFuture; - ///

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

+ ///

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon S3 Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

fn list_multipart_uploads( &self, input: ListMultipartUploadsInput, ) -> RusotoFuture; - ///

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

+ ///

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon S3 Glacier and List Parts in the Amazon Glacier Developer Guide.

fn list_parts(&self, input: ListPartsInput) -> RusotoFuture; ///

This operation lists the provisioned capacity units for the specified AWS account.

@@ -3263,13 +3262,13 @@ pub trait Glacier { input: ListProvisionedCapacityInput, ) -> RusotoFuture; - ///

This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon Glacier Resources.

+ ///

This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon S3 Glacier Resources.

fn list_tags_for_vault( &self, input: ListTagsForVaultInput, ) -> RusotoFuture; - ///

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

+ ///

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon S3 Glacier and List Vaults in the Amazon Glacier Developer Guide.

fn list_vaults( &self, input: ListVaultsInput, @@ -3281,37 +3280,37 @@ pub trait Glacier { input: PurchaseProvisionedCapacityInput, ) -> RusotoFuture; - ///

This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

+ ///

This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon S3 Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

fn remove_tags_from_vault( &self, input: RemoveTagsFromVaultInput, ) -> RusotoFuture<(), RemoveTagsFromVaultError>; - ///

This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

+ ///

This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

fn set_data_retrieval_policy( &self, input: SetDataRetrievalPolicyInput, ) -> RusotoFuture<(), SetDataRetrievalPolicyError>; - ///

This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

+ ///

This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

fn set_vault_access_policy( &self, input: SetVaultAccessPolicyInput, ) -> RusotoFuture<(), SetVaultAccessPolicyError>; - ///

This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic.

Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

  • ArchiveRetrievalCompleted This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

  • InventoryRetrievalCompleted This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

+ ///

This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon S3 Glacier to send notifications to the topic.

Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

  • ArchiveRetrievalCompleted This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

  • InventoryRetrievalCompleted This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

fn set_vault_notifications( &self, input: SetVaultNotificationsInput, ) -> RusotoFuture<(), SetVaultNotificationsError>; - ///

This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the x-amz-archive-id header of the response.

You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

+ ///

This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon S3 Glacier returns the archive ID in the x-amz-archive-id header of the response.

You must use the archive ID to access your data in Amazon S3 Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

fn upload_archive( &self, input: UploadArchiveInput, ) -> RusotoFuture; - ///

This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

Amazon Glacier rejects your upload part request if any of the following conditions is true:

  • SHA256 tree hash does not matchTo ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums.

  • Part size does not matchThe size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size.

    If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail.

  • Range does not alignThe byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail.

This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

+ ///

This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

Amazon Glacier rejects your upload part request if any of the following conditions is true:

  • SHA256 tree hash does not matchTo ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon S3 Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums.

  • Part size does not matchThe size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size.

    If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail.

  • Range does not alignThe byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail.

This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

fn upload_multipart_part( &self, input: UploadMultipartPartInput, @@ -3329,10 +3328,7 @@ impl GlacierClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> GlacierClient { - GlacierClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3346,15 +3342,19 @@ impl GlacierClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - GlacierClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> GlacierClient { + GlacierClient { client, region } } } impl Glacier for GlacierClient { - ///

This operation aborts a multipart upload identified by the upload ID.

After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Working with Archives in Amazon Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

+ ///

This operation aborts a multipart upload identified by the upload ID.

After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Working with Archives in Amazon S3 Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

fn abort_multipart_upload( &self, input: AbortMultipartUploadInput, @@ -3387,7 +3387,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

+ ///

This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

fn abort_vault_lock( &self, input: AbortVaultLockInput, @@ -3420,7 +3420,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon Glacier Resources.

+ ///

This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon S3 Glacier Resources.

fn add_tags_to_vault( &self, input: AddTagsToVaultInput, @@ -3460,7 +3460,7 @@ impl Glacier for GlacierClient { }) } - ///

You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Amazon Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Amazon Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

Additionally, Amazon Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Amazon Glacier returns an error and the operation fails.

Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

+ ///

You call this operation to inform Amazon S3 Glacier (Glacier) that all the archive parts have been uploaded and that Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

Additionally, Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Glacier returns an error and the operation fails.

Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

fn complete_multipart_upload( &self, input: CompleteMultipartUploadInput, @@ -3512,7 +3512,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

+ ///

This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

fn complete_vault_lock( &self, input: CompleteVaultLockInput, @@ -3546,7 +3546,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier.

You must use the following guidelines when naming a vault.

  • Names can be between 1 and 255 characters long.

  • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

+ ///

This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon S3 Glacier.

You must use the following guidelines when naming a vault.

  • Names can be between 1 and 255 characters long.

  • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

fn create_vault( &self, input: CreateVaultInput, @@ -3584,7 +3584,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

  • If the archive retrieval job is actively preparing the data for download when Amazon Glacier receives the delete archive request, the archival retrieval operation might fail.

  • If the archive retrieval job has successfully prepared the archive for download when Amazon Glacier receives the delete archive request, you will be able to download the output.

This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

+ ///

This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

  • If the archive retrieval job is actively preparing the data for download when Amazon S3 Glacier receives the delete archive request, the archival retrieval operation might fail.

  • If the archive retrieval job has successfully prepared the archive for download when Amazon S3 Glacier receives the delete archive request, you will be able to download the output.

This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

fn delete_archive(&self, input: DeleteArchiveInput) -> RusotoFuture<(), DeleteArchiveError> { let request_uri = format!( "/{account_id}/vaults/{vault_name}/archives/{archive_id}", @@ -3615,7 +3615,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon Glacier Developer Guide.

+ ///

This operation deletes a vault. Amazon S3 Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon S3 Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon S3 Glacier Developer Guide.

fn delete_vault(&self, input: DeleteVaultInput) -> RusotoFuture<(), DeleteVaultError> { let request_uri = format!( "/{account_id}/vaults/{vault_name}", @@ -3645,7 +3645,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

+ ///

This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon S3 Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

fn delete_vault_access_policy( &self, input: DeleteVaultAccessPolicyInput, @@ -3675,7 +3675,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Delete Vault Notification Configuration in the Amazon Glacier Developer Guide.

+ ///

This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon S3 Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Delete Vault Notification Configuration in the Amazon S3 Glacier Developer Guide.

fn delete_vault_notifications( &self, input: DeleteVaultNotificationsInput, @@ -3705,7 +3705,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob.

This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Amazon Glacier can notify the topic after it completes the job.

A job ID will not expire for at least 24 hours after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For more information about using this operation, see the documentation for the underlying REST API Describe Job in the Amazon Glacier Developer Guide.

+ ///

This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon S3 Glacier (Glacier) completes the job. For more information about initiating a job, see InitiateJob.

This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Glacier can notify the topic after it completes the job.

A job ID will not expire for at least 24 hours after Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For more information about using this operation, see the documentation for the underlying REST API Describe Job in the Amazon Glacier Developer Guide.

fn describe_job( &self, input: DescribeJobInput, @@ -3740,7 +3740,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and Describe Vault in the Amazon Glacier Developer Guide.

+ ///

This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon S3 Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon S3 Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon S3 Glacier and Describe Vault in the Amazon Glacier Developer Guide.

fn describe_vault( &self, input: DescribeVaultInput, @@ -3774,7 +3774,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

+ ///

This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

fn get_data_retrieval_policy( &self, input: GetDataRetrievalPolicyInput, @@ -3806,7 +3806,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data.

A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the Get Job Output response.

For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of bytes=0-1048575, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon Glacier The expected size is also returned in the headers from the Get Job Output response.

In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected.

A job ID does not expire for at least 24 hours after Amazon Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

+ ///

This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon S3 Glacier (Glacier) returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data.

A job ID will not expire for at least 24 hours after Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the Get Job Output response.

For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of bytes=0-1048575, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon S3 Glacier The expected size is also returned in the headers from the Get Job Output response.

In the case of an archive retrieval job, depending on the byte range you specify, Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected.

A job ID does not expire for at least 24 hours after Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

fn get_job_output( &self, input: GetJobOutputInput, @@ -3868,7 +3868,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

+ ///

This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

fn get_vault_access_policy( &self, input: GetVaultAccessPolicyInput, @@ -3901,7 +3901,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

  • The vault lock policy set on the vault.

  • The state of the vault lock, which is either InProgess or Locked.

  • When the lock ID expires. The lock ID is used to complete the vault locking process.

  • When the vault lock was initiated and put into the InProgress state.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

+ ///

This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

  • The vault lock policy set on the vault.

  • The state of the vault lock, which is either InProgess or Locked.

  • When the lock ID expires. The lock ID is used to complete the vault locking process.

  • When the vault lock was initiated and put into the InProgress state.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

fn get_vault_lock( &self, input: GetVaultLockInput, @@ -3935,7 +3935,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation retrieves the notification-configuration subresource of the specified vault.

For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

+ ///

This operation retrieves the notification-configuration subresource of the specified vault.

For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon S3 Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

fn get_vault_notifications( &self, input: GetVaultNotificationsInput, @@ -3968,7 +3968,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval. For more information about using this operation, see the documentation for the underlying REST API Initiate a Job.

+ ///

This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval. For more information about using this operation, see the documentation for the underlying REST API Initiate a Job.

fn initiate_job( &self, input: InitiateJobInput, @@ -4017,7 +4017,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

You don't need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size.

After you complete the multipart upload, Amazon Glacier removes the multipart upload resource referenced by the ID. Amazon Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

+ ///

This operation initiates a multipart upload. Amazon S3 Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

You don't need to know the size of the archive when you start a multipart upload because Amazon S3 Glacier does not require you to specify the overall archive size.

After you complete the multipart upload, Amazon S3 Glacier (Glacier) removes the multipart upload resource referenced by the ID. Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

fn initiate_multipart_upload( &self, input: InitiateMultipartUploadInput, @@ -4067,7 +4067,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation initiates the vault locking process by doing the following:

  • Installing a vault lock policy on the specified vault.

  • Setting the lock state of vault lock to InProgress.

  • Returning a lock ID, which is used to complete the vault locking process.

You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

+ ///

This operation initiates the vault locking process by doing the following:

  • Installing a vault lock policy on the specified vault.

  • Setting the lock state of vault lock to InProgress.

  • Returning a lock ID, which is used to complete the vault locking process.

You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

fn initiate_vault_lock( &self, input: InitiateVaultLockInput, @@ -4108,7 +4108,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

+ ///

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

fn list_jobs(&self, input: ListJobsInput) -> RusotoFuture { let request_uri = format!( "/{account_id}/vaults/{vault_name}/jobs", @@ -4154,7 +4154,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

+ ///

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon S3 Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

fn list_multipart_uploads( &self, input: ListMultipartUploadsInput, @@ -4196,7 +4196,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

+ ///

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon S3 Glacier and List Parts in the Amazon Glacier Developer Guide.

fn list_parts(&self, input: ListPartsInput) -> RusotoFuture { let request_uri = format!( "/{account_id}/vaults/{vault_name}/multipart-uploads/{upload_id}", @@ -4267,7 +4267,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon Glacier Resources.

+ ///

This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon S3 Glacier Resources.

fn list_tags_for_vault( &self, input: ListTagsForVaultInput, @@ -4301,7 +4301,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

+ ///

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon S3 Glacier and List Vaults in the Amazon Glacier Developer Guide.

fn list_vaults( &self, input: ListVaultsInput, @@ -4374,7 +4374,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

+ ///

This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon S3 Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

fn remove_tags_from_vault( &self, input: RemoveTagsFromVaultInput, @@ -4413,7 +4413,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

+ ///

This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

fn set_data_retrieval_policy( &self, input: SetDataRetrievalPolicyInput, @@ -4447,7 +4447,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

+ ///

This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

fn set_vault_access_policy( &self, input: SetVaultAccessPolicyInput, @@ -4482,7 +4482,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic.

Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

  • ArchiveRetrievalCompleted This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

  • InventoryRetrievalCompleted This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

+ ///

This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon S3 Glacier to send notifications to the topic.

Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

  • ArchiveRetrievalCompleted This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

  • InventoryRetrievalCompleted This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

fn set_vault_notifications( &self, input: SetVaultNotificationsInput, @@ -4517,7 +4517,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the x-amz-archive-id header of the response.

You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

+ ///

This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon S3 Glacier returns the archive ID in the x-amz-archive-id header of the response.

You must use the archive ID to access your data in Amazon S3 Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

fn upload_archive( &self, input: UploadArchiveInput, @@ -4581,7 +4581,7 @@ impl Glacier for GlacierClient { }) } - ///

This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

Amazon Glacier rejects your upload part request if any of the following conditions is true:

  • SHA256 tree hash does not matchTo ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums.

  • Part size does not matchThe size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size.

    If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail.

  • Range does not alignThe byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail.

This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

+ ///

This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

Amazon Glacier rejects your upload part request if any of the following conditions is true:

  • SHA256 tree hash does not matchTo ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon S3 Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums.

  • Part size does not matchThe size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size.

    If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail.

  • Range does not alignThe byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail.

This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

fn upload_multipart_part( &self, input: UploadMultipartPartInput, diff --git a/rusoto/services/glacier/src/lib.rs b/rusoto/services/glacier/src/lib.rs index 74d0a8c158e..69eeec94d59 100644 --- a/rusoto/services/glacier/src/lib.rs +++ b/rusoto/services/glacier/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon Glacier is a storage solution for "cold data."

Amazon Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Amazon Glacier, customers can store their data cost effectively for months, years, or decades. Amazon Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

Amazon Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3).

You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Amazon Glacier.

If you are a first-time user of Amazon Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide:

  • What is Amazon Glacier - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service.

  • Getting Started with Amazon Glacier - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives.

+//!

Amazon S3 Glacier (Glacier) is a storage solution for "cold data."

Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Glacier, customers can store their data cost effectively for months, years, or decades. Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3).

You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Glacier.

If you are a first-time user of Glacier, we recommend that you begin by reading the following sections in the Amazon S3 Glacier Developer Guide:

  • What is Amazon S3 Glacier - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service.

  • Getting Started with Amazon S3 Glacier - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives.

//! //! If you're using the service, you're probably looking for [GlacierClient](struct.GlacierClient.html) and [Glacier](trait.Glacier.html). diff --git a/rusoto/services/glue/Cargo.toml b/rusoto/services/glue/Cargo.toml index 33eb2389c1a..6c034158b37 100644 --- a/rusoto/services/glue/Cargo.toml +++ b/rusoto/services/glue/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_glue" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/glue/README.md b/rusoto/services/glue/README.md index d0fb09d613a..ce6b69c3de1 100644 --- a/rusoto/services/glue/README.md +++ b/rusoto/services/glue/README.md @@ -23,9 +23,16 @@ To use `rusoto_glue` in your application, add it as a dependency in your `Cargo. ```toml [dependencies] -rusoto_glue = "0.40.0" +rusoto_glue = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/glue/src/custom/mod.rs b/rusoto/services/glue/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/glue/src/custom/mod.rs +++ b/rusoto/services/glue/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/glue/src/generated.rs b/rusoto/services/glue/src/generated.rs index 99e9b791293..ad2d2223658 100644 --- a/rusoto/services/glue/src/generated.rs +++ b/rusoto/services/glue/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -31,6 +30,10 @@ pub struct Action { #[serde(rename = "Arguments")] #[serde(skip_serializing_if = "Option::is_none")] pub arguments: Option<::std::collections::HashMap>, + ///

The name of the crawler to be used with this action.

+ #[serde(rename = "CrawlerName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crawler_name: Option, ///

The name of a job to be executed.

#[serde(rename = "JobName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -51,7 +54,7 @@ pub struct Action { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchCreatePartitionRequest { - ///

The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.

+ ///

The ID of the catalog in which the partition is to be created. Currently, this should be the AWS account ID.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -67,9 +70,9 @@ pub struct BatchCreatePartitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchCreatePartitionResponse { - ///

Errors encountered when trying to create the requested partitions.

+ ///

The errors encountered when trying to create the requested partitions.

#[serde(rename = "Errors")] #[serde(skip_serializing_if = "Option::is_none")] pub errors: Option>, @@ -87,7 +90,7 @@ pub struct BatchDeleteConnectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteConnectionResponse { ///

A map of the names of connections that were not successfully deleted to error details.

#[serde(rename = "Errors")] @@ -101,7 +104,7 @@ pub struct BatchDeleteConnectionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDeletePartitionRequest { - ///

The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -111,15 +114,15 @@ pub struct BatchDeletePartitionRequest { ///

A list of PartitionInput structures that define the partitions to be deleted.

#[serde(rename = "PartitionsToDelete")] pub partitions_to_delete: Vec, - ///

The name of the table where the partitions to be deleted is located.

+ ///

The name of the table that contains the partitions to be deleted.

#[serde(rename = "TableName")] pub table_name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeletePartitionResponse { - ///

Errors encountered when trying to delete the requested partitions.

+ ///

The errors encountered when trying to delete the requested partitions.

#[serde(rename = "Errors")] #[serde(skip_serializing_if = "Option::is_none")] pub errors: Option>, @@ -127,11 +130,11 @@ pub struct BatchDeletePartitionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDeleteTableRequest { - ///

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, - ///

The name of the catalog database where the tables to delete reside. For Hive compatibility, this name is entirely lowercase.

+ ///

The name of the catalog database in which the tables to delete reside. For Hive compatibility, this name is entirely lowercase.

#[serde(rename = "DatabaseName")] pub database_name: String, ///

A list of the table to delete.

@@ -140,7 +143,7 @@ pub struct BatchDeleteTableRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteTableResponse { ///

A list of errors encountered in attempting to delete the specified tables.

#[serde(rename = "Errors")] @@ -150,7 +153,7 @@ pub struct BatchDeleteTableResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchDeleteTableVersionRequest { - ///

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -166,7 +169,7 @@ pub struct BatchDeleteTableVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDeleteTableVersionResponse { ///

A list of errors encountered while trying to delete the specified table versions.

#[serde(rename = "Errors")] @@ -182,7 +185,7 @@ pub struct BatchGetCrawlersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetCrawlersResponse { ///

A list of crawler definitions.

#[serde(rename = "Crawlers")] @@ -196,19 +199,19 @@ pub struct BatchGetCrawlersResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BatchGetDevEndpointsRequest { - ///

The list of DevEndpoint names, which may be the names returned from the ListDevEndpoint operation.

+ ///

The list of DevEndpoint names, which might be the names returned from the ListDevEndpoint operation.

#[serde(rename = "DevEndpointNames")] pub dev_endpoint_names: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetDevEndpointsResponse { - ///

A list of DevEndpoint definitions.

+ ///

A list of DevEndpoint definitions.

#[serde(rename = "DevEndpoints")] #[serde(skip_serializing_if = "Option::is_none")] pub dev_endpoints: Option>, - ///

A list of DevEndpoints not found.

+ ///

A list of DevEndpoints not found.

#[serde(rename = "DevEndpointsNotFound")] #[serde(skip_serializing_if = "Option::is_none")] pub dev_endpoints_not_found: Option>, @@ -222,7 +225,7 @@ pub struct BatchGetJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetJobsResponse { ///

A list of job definitions.

#[serde(rename = "Jobs")] @@ -252,13 +255,13 @@ pub struct BatchGetPartitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetPartitionResponse { ///

A list of the requested partitions.

#[serde(rename = "Partitions")] #[serde(skip_serializing_if = "Option::is_none")] pub partitions: Option>, - ///

A list of the partition values in the request for which partions were not returned.

+ ///

A list of the partition values in the request for which partitions were not returned.

#[serde(rename = "UnprocessedKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub unprocessed_keys: Option>, @@ -272,7 +275,7 @@ pub struct BatchGetTriggersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetTriggersResponse { ///

A list of trigger definitions.

#[serde(rename = "Triggers")] @@ -284,9 +287,33 @@ pub struct BatchGetTriggersResponse { pub triggers_not_found: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchGetWorkflowsRequest { + ///

Specifies whether to include a graph when returning the workflow resource metadata.

+ #[serde(rename = "IncludeGraph")] + #[serde(skip_serializing_if = "Option::is_none")] + pub include_graph: Option, + ///

A list of workflow names, which may be the names returned from the ListWorkflows operation.

+ #[serde(rename = "Names")] + pub names: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchGetWorkflowsResponse { + ///

A list of names of workflows not found.

+ #[serde(rename = "MissingWorkflows")] + #[serde(skip_serializing_if = "Option::is_none")] + pub missing_workflows: Option>, + ///

A list of workflow resource metadata.

+ #[serde(rename = "Workflows")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflows: Option>, +} + ///

Records an error that occurred when attempting to stop a specified job run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchStopJobRunError { ///

Specifies details about the error that was encountered.

#[serde(rename = "ErrorDetail")] @@ -313,7 +340,7 @@ pub struct BatchStopJobRunRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchStopJobRunResponse { ///

A list of the errors that were encountered in trying to stop JobRuns, including the JobRunId for which each error was encountered and details about the error.

#[serde(rename = "Errors")] @@ -327,7 +354,7 @@ pub struct BatchStopJobRunResponse { ///

Records a successful request to stop a specified JobRun.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchStopJobRunSuccessfulSubmission { ///

The name of the job definition used in the job run that was stopped.

#[serde(rename = "JobName")] @@ -339,6 +366,33 @@ pub struct BatchStopJobRunSuccessfulSubmission { pub job_run_id: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CancelMLTaskRunRequest { + ///

A unique identifier for the task run.

+ #[serde(rename = "TaskRunId")] + pub task_run_id: String, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CancelMLTaskRunResponse { + ///

The status for this run.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The unique identifier for the task run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, +} + ///

Specifies a table definition in the AWS Glue Data Catalog.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CatalogEntry { @@ -352,9 +406,9 @@ pub struct CatalogEntry { ///

A structure containing migration status information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CatalogImportStatus { - ///

True if the migration has completed, or False otherwise.

+ ///

True if the migration has completed, or False otherwise.

#[serde(rename = "ImportCompleted")] #[serde(skip_serializing_if = "Option::is_none")] pub import_completed: Option, @@ -381,7 +435,7 @@ pub struct CatalogTarget { ///

Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format.

You can use the standard classifiers that AWS Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Classifier { ///

A classifier for comma-separated values (CSV).

#[serde(rename = "CsvClassifier")] @@ -401,14 +455,14 @@ pub struct Classifier { pub xml_classifier: Option, } -///

Specifies how CloudWatch data should be encrypted.

+///

Specifies how Amazon CloudWatch data should be encrypted.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CloudWatchEncryption { ///

The encryption mode to use for CloudWatch data.

#[serde(rename = "CloudWatchEncryptionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub cloud_watch_encryption_mode: Option, - ///

The AWS ARN of the KMS key to be used to encrypt the data.

+ ///

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

#[serde(rename = "KmsKeyArn")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_arn: Option, @@ -465,14 +519,18 @@ pub struct CodeGenNodeArg { ///

A column in a Table.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Column { - ///

Free-form text comment.

+ ///

A free-form text comment.

#[serde(rename = "Comment")] #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, ///

The name of the Column.

#[serde(rename = "Name")] pub name: String, - ///

The datatype of data in the Column.

+ ///

These key-value pairs define properties associated with the column.

+ #[serde(rename = "Parameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option<::std::collections::HashMap>, + ///

The data type of the Column.

#[serde(rename = "Type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -481,6 +539,14 @@ pub struct Column { ///

Defines a condition under which a trigger fires.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Condition { + ///

The state of the crawler to which this condition applies.

+ #[serde(rename = "CrawlState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crawl_state: Option, + ///

The name of the crawler to which this condition applies.

+ #[serde(rename = "CrawlerName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crawler_name: Option, ///

The name of the job whose JobRuns this condition applies to, and on which this trigger waits.

#[serde(rename = "JobName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -495,11 +561,33 @@ pub struct Condition { pub state: Option, } +///

The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.

For more information, see Confusion matrix in Wikipedia.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ConfusionMatrix { + ///

The number of matches in the data that the transform didn't find, in the confusion matrix for your transform.

+ #[serde(rename = "NumFalseNegatives")] + #[serde(skip_serializing_if = "Option::is_none")] + pub num_false_negatives: Option, + ///

The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.

+ #[serde(rename = "NumFalsePositives")] + #[serde(skip_serializing_if = "Option::is_none")] + pub num_false_positives: Option, + ///

The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.

+ #[serde(rename = "NumTrueNegatives")] + #[serde(skip_serializing_if = "Option::is_none")] + pub num_true_negatives: Option, + ///

The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.

+ #[serde(rename = "NumTruePositives")] + #[serde(skip_serializing_if = "Option::is_none")] + pub num_true_positives: Option, +} + ///

Defines a connection to a data source.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Connection { - ///

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USERNAME - The name under which to log in to the database. The value string for USERNAME is "USERNAME".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTEDPASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBCDRIVERJARURI - The Amazon S3 path of the JAR file that contains the JDBC driver to use.

  • JDBCDRIVERCLASSNAME - The class name of the JDBC driver to use.

  • JDBCENGINE - The name of the JDBC engine to use.

  • JDBCENGINEVERSION - The version of the JDBC engine to use.

  • CONFIGFILES - (Reserved for future use).

  • INSTANCEID - The instance ID to use.

  • JDBCCONNECTIONURL - The URL for the JDBC connection.

  • JDBCENFORCESSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching will be enforced for the JDBC connection on the client. The default is false.

+ ///

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USERNAME - The name under which to log in to the database. The value string for USERNAME is "USERNAME".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTEDPASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBCDRIVERJARURI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBCDRIVERCLASSNAME - The class name of the JDBC driver to use.

  • JDBCENGINE - The name of the JDBC engine to use.

  • JDBCENGINEVERSION - The version of the JDBC engine to use.

  • CONFIGFILES - (Reserved for future use.)

  • INSTANCEID - The instance ID to use.

  • JDBCCONNECTIONURL - The URL for the JDBC connection.

  • JDBCENFORCESSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

#[serde(rename = "ConnectionProperties")] #[serde(skip_serializing_if = "Option::is_none")] pub connection_properties: Option<::std::collections::HashMap>, @@ -563,7 +651,7 @@ pub struct ConnectionInput { pub physical_connection_requirements: Option, } -///

The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.

When a CreationConnection request arrives containing a password, the Data Catalog first encrypts the password using your AWS KMS key. It then encrypts the whole connection object again if catalog encryption is also enabled.

This encryption requires that you set AWS KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you might want only admin users to have decrypt permission on the password key.

+///

The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.

When a CreationConnection request arrives containing a password, the Data Catalog first encrypts the password using your AWS KMS key. It then encrypts the whole connection object again if catalog encryption is also enabled.

This encryption requires that you set AWS KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you might want only administrators to have decrypt permission on the password key.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ConnectionPasswordEncryption { ///

An AWS KMS key that is used to encrypt the connection password.

If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.

You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.

@@ -584,9 +672,39 @@ pub struct ConnectionsList { pub connections: Option>, } +///

The details of a crawl in the workflow.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Crawl { + ///

The date and time on which the crawl completed.

+ #[serde(rename = "CompletedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub completed_on: Option, + ///

The error message associated with the crawl.

+ #[serde(rename = "ErrorMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_message: Option, + ///

The log group associated with the crawl.

+ #[serde(rename = "LogGroup")] + #[serde(skip_serializing_if = "Option::is_none")] + pub log_group: Option, + ///

The log stream associated with the crawl.

+ #[serde(rename = "LogStream")] + #[serde(skip_serializing_if = "Option::is_none")] + pub log_stream: Option, + ///

The date and time on which the crawl started.

+ #[serde(rename = "StartedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub started_on: Option, + ///

The state of the crawler.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + ///

Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Crawler { ///

A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.

#[serde(rename = "Classifiers")] @@ -660,7 +778,7 @@ pub struct Crawler { ///

Metrics for a specified crawler.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CrawlerMetrics { ///

The name of the crawler.

#[serde(rename = "CrawlerName")] @@ -696,6 +814,16 @@ pub struct CrawlerMetrics { pub time_left_seconds: Option, } +///

The details of a Crawler node present in the workflow.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CrawlerNodeDetails { + ///

A list of crawls represented by the crawl node.

+ #[serde(rename = "Crawls")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crawls: Option>, +} + ///

Specifies data stores to crawl.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CrawlerTargets { @@ -738,7 +866,7 @@ pub struct CreateClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClassifierResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -753,7 +881,7 @@ pub struct CreateConnectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConnectionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -806,7 +934,7 @@ pub struct CreateCrawlerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCrawlerResponse {} ///

Specifies a custom CSV classifier for CreateClassifier to create.

@@ -843,97 +971,105 @@ pub struct CreateCsvClassifierRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateDatabaseRequest { - ///

The ID of the Data Catalog in which to create the database. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog in which to create the database. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, - ///

A DatabaseInput object defining the metadata database to create in the catalog.

+ ///

The metadata for the database.

#[serde(rename = "DatabaseInput")] pub database_input: DatabaseInput, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDatabaseResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateDevEndpointRequest { - ///

A map of arguments used to configure the DevEndpoint.

+ ///

A map of arguments used to configure the DevEndpoint.

#[serde(rename = "Arguments")] #[serde(skip_serializing_if = "Option::is_none")] pub arguments: Option<::std::collections::HashMap>, - ///

The name to be assigned to the new DevEndpoint.

+ ///

The name to be assigned to the new DevEndpoint.

#[serde(rename = "EndpointName")] pub endpoint_name: String, - ///

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

+ ///

The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint.

#[serde(rename = "ExtraJarsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_jars_s3_path: Option, - ///

Path(s) to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.

Please note that only pure Python libraries can currently be used on a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.

+ ///

The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.

You can only use pure Python libraries with a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.

#[serde(rename = "ExtraPythonLibsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_python_libs_s3_path: Option, - ///

The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint.

+ ///

The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint.

#[serde(rename = "NumberOfNodes")] #[serde(skip_serializing_if = "Option::is_none")] pub number_of_nodes: Option, - ///

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility, as the recommended attribute to use is public keys.

+ ///

The number of workers of a defined workerType that are allocated to the development endpoint.

The maximum number of workers you can define are 299 for G.1X, and 149 for G.2X.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.

#[serde(rename = "PublicKey")] #[serde(skip_serializing_if = "Option::is_none")] pub public_key: Option, - ///

A list of public keys to be used by the DevEndpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys: call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

+ ///

A list of public keys to be used by the development endpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

#[serde(rename = "PublicKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub public_keys: Option>, - ///

The IAM role for the DevEndpoint.

+ ///

The IAM role for the DevEndpoint.

#[serde(rename = "RoleArn")] pub role_arn: String, - ///

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

+ ///

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

#[serde(rename = "SecurityConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub security_configuration: Option, - ///

Security group IDs for the security groups to be used by the new DevEndpoint.

+ ///

Security group IDs for the security groups to be used by the new DevEndpoint.

#[serde(rename = "SecurityGroupIds")] #[serde(skip_serializing_if = "Option::is_none")] pub security_group_ids: Option>, - ///

The subnet ID for the new DevEndpoint to use.

+ ///

The subnet ID for the new DevEndpoint to use.

#[serde(rename = "SubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub subnet_id: Option, - ///

The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

+ ///

The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, + ///

The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDevEndpointResponse { - ///

The map of arguments used to configure this DevEndpoint.

+ ///

The map of arguments used to configure this DevEndpoint.

#[serde(rename = "Arguments")] #[serde(skip_serializing_if = "Option::is_none")] pub arguments: Option<::std::collections::HashMap>, - ///

The AWS availability zone where this DevEndpoint is located.

+ ///

The AWS Availability Zone where this DevEndpoint is located.

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, - ///

The point in time at which this DevEndpoint was created.

+ ///

The point in time at which this DevEndpoint was created.

#[serde(rename = "CreatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub created_timestamp: Option, - ///

The name assigned to the new DevEndpoint.

+ ///

The name assigned to the new DevEndpoint.

#[serde(rename = "EndpointName")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_name: Option, - ///

Path to one or more Java Jars in an S3 bucket that will be loaded in your DevEndpoint.

+ ///

Path to one or more Java .jar files in an S3 bucket that will be loaded in your DevEndpoint.

#[serde(rename = "ExtraJarsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_jars_s3_path: Option, - ///

Path(s) to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint.

+ ///

The paths to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint.

#[serde(rename = "ExtraPythonLibsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_python_libs_s3_path: Option, - ///

The reason for a current failure in this DevEndpoint.

+ ///

The reason for a current failure in this DevEndpoint.

#[serde(rename = "FailureReason")] #[serde(skip_serializing_if = "Option::is_none")] pub failure_reason: Option, @@ -941,31 +1077,39 @@ pub struct CreateDevEndpointResponse { #[serde(rename = "NumberOfNodes")] #[serde(skip_serializing_if = "Option::is_none")] pub number_of_nodes: Option, - ///

The AWS ARN of the role assigned to the new DevEndpoint.

+ ///

The number of workers of a defined workerType that are allocated to the development endpoint.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

The Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint.

#[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

The name of the SecurityConfiguration structure being used with this DevEndpoint.

+ ///

The name of the SecurityConfiguration structure being used with this DevEndpoint.

#[serde(rename = "SecurityConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub security_configuration: Option, - ///

The security groups assigned to the new DevEndpoint.

+ ///

The security groups assigned to the new DevEndpoint.

#[serde(rename = "SecurityGroupIds")] #[serde(skip_serializing_if = "Option::is_none")] pub security_group_ids: Option>, - ///

The current status of the new DevEndpoint.

+ ///

The current status of the new DevEndpoint.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - ///

The subnet ID assigned to the new DevEndpoint.

+ ///

The subnet ID assigned to the new DevEndpoint.

#[serde(rename = "SubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub subnet_id: Option, - ///

The ID of the VPC used by this DevEndpoint.

+ ///

The ID of the virtual private cloud (VPC) used by this DevEndpoint.

#[serde(rename = "VpcId")] #[serde(skip_serializing_if = "Option::is_none")] pub vpc_id: Option, - ///

The address of the YARN endpoint used by this DevEndpoint.

+ ///

The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, + ///

The address of the YARN endpoint used by this DevEndpoint.

#[serde(rename = "YarnEndpointAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub yarn_endpoint_address: Option, @@ -1014,6 +1158,10 @@ pub struct CreateJobRequest { #[serde(rename = "ExecutionProperty")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_property: Option, + ///

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.

For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

Jobs that are created without specifying a Glue version default to Glue 0.9.

+ #[serde(rename = "GlueVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub glue_version: Option, ///

This field is reserved for future use.

#[serde(rename = "LogUri")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1059,7 +1207,7 @@ pub struct CreateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobResponse { ///

The unique name that was provided for this job definition.

#[serde(rename = "Name")] @@ -1078,9 +1226,58 @@ pub struct CreateJsonClassifierRequest { pub name: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateMLTransformRequest { + ///

A description of the machine learning transform that is being defined. The default is an empty string.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

A list of AWS Glue table definitions used by the transform.

+ #[serde(rename = "InputRecordTables")] + pub input_record_tables: Vec, + ///

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

+ #[serde(rename = "MaxCapacity")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_capacity: Option, + ///

The maximum number of times to retry a task for this transform after a task run fails.

+ #[serde(rename = "MaxRetries")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_retries: Option, + ///

The unique name that you give the transform when you create it.

+ #[serde(rename = "Name")] + pub name: String, + ///

The number of workers of a defined workerType that are allocated when this task runs.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

The algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type.

+ #[serde(rename = "Parameters")] + pub parameters: TransformParameters, + ///

The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. Ensure that this role has permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries that are used by the task run for this transform.

+ #[serde(rename = "Role")] + pub role: String, + ///

The timeout of the task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

+ #[serde(rename = "Timeout")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timeout: Option, + ///

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.

  • For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateMLTransformResponse { + ///

A unique identifier that is generated for the transform.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreatePartitionRequest { - ///

The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.

+ ///

The AWS account ID of the catalog in which the partition is to be created.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1096,7 +1293,7 @@ pub struct CreatePartitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePartitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1116,7 +1313,7 @@ pub struct CreateScriptRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateScriptResponse { ///

The Python script generated from the DAG.

#[serde(rename = "PythonScript")] @@ -1139,7 +1336,7 @@ pub struct CreateSecurityConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSecurityConfigurationResponse { ///

The time at which the new security configuration was created.

#[serde(rename = "CreatedTimestamp")] @@ -1166,7 +1363,7 @@ pub struct CreateTableRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTableResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1200,10 +1397,14 @@ pub struct CreateTriggerRequest { ///

The type of the new trigger.

#[serde(rename = "Type")] pub type_: String, + ///

The name of the workflow associated with the trigger.

+ #[serde(rename = "WorkflowName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_name: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTriggerResponse { ///

The name of the trigger.

#[serde(rename = "Name")] @@ -1213,7 +1414,7 @@ pub struct CreateTriggerResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateUserDefinedFunctionRequest { - ///

The ID of the Data Catalog in which to create the function. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog in which to create the function. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1226,9 +1427,37 @@ pub struct CreateUserDefinedFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserDefinedFunctionResponse {} +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateWorkflowRequest { + ///

A collection of properties to be used as part of each execution of the workflow.

+ #[serde(rename = "DefaultRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub default_run_properties: Option<::std::collections::HashMap>, + ///

A description of the workflow.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The name to be assigned to the workflow. It should be unique within your account.

+ #[serde(rename = "Name")] + pub name: String, + ///

The tags to be used with this workflow.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateWorkflowResponse { + ///

The name of the workflow which was provided as part of the request.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + ///

Specifies an XML classifier for CreateClassifier to create.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateXMLClassifierRequest { @@ -1246,7 +1475,7 @@ pub struct CreateXMLClassifierRequest { ///

A classifier for custom CSV content.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CsvClassifier { ///

Enables the processing of files that contain only one column.

#[serde(rename = "AllowSingleColumn")] @@ -1302,15 +1531,28 @@ pub struct DataCatalogEncryptionSettings { pub encryption_at_rest: Option, } -///

The Database object represents a logical grouping of tables that may reside in a Hive metastore or an RDBMS.

+///

The AWS Lake Formation principal.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DataLakePrincipal { + ///

An identifier for the AWS Lake Formation principal.

+ #[serde(rename = "DataLakePrincipalIdentifier")] + #[serde(skip_serializing_if = "Option::is_none")] + pub data_lake_principal_identifier: Option, +} + +///

The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Database { + ///

Creates a set of default permissions on the table for principals.

+ #[serde(rename = "CreateTableDefaultPermissions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub create_table_default_permissions: Option>, ///

The time at which the metadata database was created in the catalog.

#[serde(rename = "CreateTime")] #[serde(skip_serializing_if = "Option::is_none")] pub create_time: Option, - ///

Description of the database.

+ ///

A description of the database.

#[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, @@ -1318,7 +1560,7 @@ pub struct Database { #[serde(rename = "LocationUri")] #[serde(skip_serializing_if = "Option::is_none")] pub location_uri: Option, - ///

Name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

+ ///

The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

#[serde(rename = "Name")] pub name: String, ///

These key-value pairs define parameters and properties of the database.

@@ -1330,18 +1572,22 @@ pub struct Database { ///

The structure used to create or update a database.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DatabaseInput { - ///

Description of the database

+ ///

Creates a set of default permissions on the table for principals.

+ #[serde(rename = "CreateTableDefaultPermissions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub create_table_default_permissions: Option>, + ///

A description of the database.

#[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

The location of the database (for example, an HDFS path).

+ ///

The location of the database (for example, an HDFS path).

#[serde(rename = "LocationUri")] #[serde(skip_serializing_if = "Option::is_none")] pub location_uri: Option, - ///

Name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

+ ///

The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

#[serde(rename = "Name")] pub name: String, - ///

Thes key-value pairs define parameters and properties of the database.

+ ///

These key-value pairs define parameters and properties of the database.

These key-value pairs define parameters and properties of the database.

#[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap>, @@ -1355,7 +1601,7 @@ pub struct DeleteClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteClassifierResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1370,7 +1616,7 @@ pub struct DeleteConnectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteConnectionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1381,33 +1627,33 @@ pub struct DeleteCrawlerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCrawlerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteDatabaseRequest { - ///

The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, - ///

The name of the Database to delete. For Hive compatibility, this must be all lowercase.

+ ///

The name of the database to delete. For Hive compatibility, this must be all lowercase.

#[serde(rename = "Name")] pub name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDatabaseResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteDevEndpointRequest { - ///

The name of the DevEndpoint.

+ ///

The name of the DevEndpoint.

#[serde(rename = "EndpointName")] pub endpoint_name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDevEndpointResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1418,7 +1664,7 @@ pub struct DeleteJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteJobResponse { ///

The name of the job definition that was deleted.

#[serde(rename = "JobName")] @@ -1426,9 +1672,25 @@ pub struct DeleteJobResponse { pub job_name: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteMLTransformRequest { + ///

The unique identifier of the transform to delete.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteMLTransformResponse { + ///

The unique identifier of the transform that was deleted.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeletePartitionRequest { - ///

The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1438,13 +1700,13 @@ pub struct DeletePartitionRequest { ///

The values that define the partition.

#[serde(rename = "PartitionValues")] pub partition_values: Vec, - ///

The name of the table where the partition to be deleted is located.

+ ///

The name of the table that contains the partition to be deleted.

#[serde(rename = "TableName")] pub table_name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePartitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1456,7 +1718,7 @@ pub struct DeleteResourcePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResourcePolicyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1467,12 +1729,12 @@ pub struct DeleteSecurityConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSecurityConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteTableRequest { - ///

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1485,12 +1747,12 @@ pub struct DeleteTableRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTableResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteTableVersionRequest { - ///

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1506,7 +1768,7 @@ pub struct DeleteTableVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTableVersionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1517,7 +1779,7 @@ pub struct DeleteTriggerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTriggerResponse { ///

The name of the trigger that was deleted.

#[serde(rename = "Name")] @@ -1540,18 +1802,34 @@ pub struct DeleteUserDefinedFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserDefinedFunctionResponse {} -///

A development endpoint where a developer can remotely debug ETL scripts.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteWorkflowRequest { + ///

Name of the workflow to be deleted.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteWorkflowResponse { + ///

Name of the workflow specified in input.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +///

A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DevEndpoint { - ///

A map of arguments used to configure the DevEndpoint.

Note that currently, we only support "--enable-glue-datacatalog": "" as a valid argument.

+ ///

A map of arguments used to configure the DevEndpoint.

Currently, only "--enable-glue-datacatalog": "" is supported as a valid argument.

#[serde(rename = "Arguments")] #[serde(skip_serializing_if = "Option::is_none")] pub arguments: Option<::std::collections::HashMap>, - ///

The AWS availability zone where this DevEndpoint is located.

+ ///

The AWS Availability Zone where this DevEndpoint is located.

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, @@ -1559,23 +1837,23 @@ pub struct DevEndpoint { #[serde(rename = "CreatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub created_timestamp: Option, - ///

The name of the DevEndpoint.

+ ///

The name of the DevEndpoint.

#[serde(rename = "EndpointName")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint_name: Option, - ///

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

Please note that only pure Java/Scala libraries can currently be used on a DevEndpoint.

+ ///

The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint.

You can only use pure Java/Scala libraries with a DevEndpoint.

#[serde(rename = "ExtraJarsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_jars_s3_path: Option, - ///

Path(s) to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.

Please note that only pure Python libraries can currently be used on a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.

+ ///

The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.

You can only use pure Python libraries with a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.

#[serde(rename = "ExtraPythonLibsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_python_libs_s3_path: Option, - ///

The reason for a current failure in this DevEndpoint.

+ ///

The reason for a current failure in this DevEndpoint.

#[serde(rename = "FailureReason")] #[serde(skip_serializing_if = "Option::is_none")] pub failure_reason: Option, - ///

The point in time at which this DevEndpoint was last modified.

+ ///

The point in time at which this DevEndpoint was last modified.

#[serde(rename = "LastModifiedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_timestamp: Option, @@ -1583,51 +1861,59 @@ pub struct DevEndpoint { #[serde(rename = "LastUpdateStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub last_update_status: Option, - ///

The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.

+ ///

The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.

#[serde(rename = "NumberOfNodes")] #[serde(skip_serializing_if = "Option::is_none")] pub number_of_nodes: Option, - ///

A private IP address to access the DevEndpoint within a VPC, if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your virtual private cloud (VPC).

+ ///

The number of workers of a defined workerType that are allocated to the development endpoint.

The maximum number of workers you can define are 299 for G.1X, and 149 for G.2X.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.

#[serde(rename = "PrivateAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub private_address: Option, - ///

The public IP address used by this DevEndpoint. The PublicAddress field is present only when you create a non-VPC (virtual private cloud) DevEndpoint.

+ ///

The public IP address used by this DevEndpoint. The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint.

#[serde(rename = "PublicAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub public_address: Option, - ///

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility, as the recommended attribute to use is public keys.

+ ///

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.

#[serde(rename = "PublicKey")] #[serde(skip_serializing_if = "Option::is_none")] pub public_key: Option, - ///

A list of public keys to be used by the DevEndpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys: call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

+ ///

A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

#[serde(rename = "PublicKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub public_keys: Option>, - ///

The AWS ARN of the IAM role used in this DevEndpoint.

+ ///

The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint.

#[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

+ ///

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

#[serde(rename = "SecurityConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub security_configuration: Option, - ///

A list of security group identifiers used in this DevEndpoint.

+ ///

A list of security group identifiers used in this DevEndpoint.

#[serde(rename = "SecurityGroupIds")] #[serde(skip_serializing_if = "Option::is_none")] pub security_group_ids: Option>, - ///

The current status of this DevEndpoint.

+ ///

The current status of this DevEndpoint.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - ///

The subnet ID for this DevEndpoint.

+ ///

The subnet ID for this DevEndpoint.

#[serde(rename = "SubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub subnet_id: Option, - ///

The ID of the virtual private cloud (VPC) used by this DevEndpoint.

+ ///

The ID of the virtual private cloud (VPC) used by this DevEndpoint.

#[serde(rename = "VpcId")] #[serde(skip_serializing_if = "Option::is_none")] pub vpc_id: Option, - ///

The YARN endpoint address used by this DevEndpoint.

+ ///

The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, + ///

The YARN endpoint address used by this DevEndpoint.

#[serde(rename = "YarnEndpointAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub yarn_endpoint_address: Option, @@ -1637,14 +1923,14 @@ pub struct DevEndpoint { pub zeppelin_remote_spark_interpreter_port: Option, } -///

Custom libraries to be loaded into a DevEndpoint.

+///

Custom libraries to be loaded into a development endpoint.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DevEndpointCustomLibraries { - ///

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

Please note that only pure Java/Scala libraries can currently be used on a DevEndpoint.

+ ///

The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint.

You can only use pure Java/Scala libraries with a DevEndpoint.

#[serde(rename = "ExtraJarsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_jars_s3_path: Option, - ///

Path(s) to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.

Please note that only pure Python libraries can currently be used on a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.

+ ///

The paths to one or more Python libraries in an Amazon Simple Storage Service (Amazon S3) bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.

You can only use pure Python libraries with a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.

#[serde(rename = "ExtraPythonLibsS3Path")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_python_libs_s3_path: Option, @@ -1659,6 +1945,20 @@ pub struct DynamoDBTarget { pub path: Option, } +///

An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Edge { + ///

The unique of the node within the workflow where the edge ends.

+ #[serde(rename = "DestinationId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_id: Option, + ///

The unique of the node within the workflow where the edge starts.

+ #[serde(rename = "SourceId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_id: Option, +} + ///

Specifies the encryption-at-rest configuration for the Data Catalog.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EncryptionAtRest { @@ -1674,15 +1974,15 @@ pub struct EncryptionAtRest { ///

Specifies an encryption configuration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EncryptionConfiguration { - ///

The encryption configuration for CloudWatch.

+ ///

The encryption configuration for Amazon CloudWatch.

#[serde(rename = "CloudWatchEncryption")] #[serde(skip_serializing_if = "Option::is_none")] pub cloud_watch_encryption: Option, - ///

The encryption configuration for Job Bookmarks.

+ ///

The encryption configuration for job bookmarks.

#[serde(rename = "JobBookmarksEncryption")] #[serde(skip_serializing_if = "Option::is_none")] pub job_bookmarks_encryption: Option, - ///

The encryption configuration for S3 data.

+ ///

The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.

#[serde(rename = "S3Encryption")] #[serde(skip_serializing_if = "Option::is_none")] pub s3_encryption: Option>, @@ -1690,7 +1990,7 @@ pub struct EncryptionConfiguration { ///

Contains details about an error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorDetail { ///

The code associated with this error.

#[serde(rename = "ErrorCode")] @@ -1702,6 +2002,19 @@ pub struct ErrorDetail { pub error_message: Option, } +///

Evaluation metrics provide an estimate of the quality of your machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EvaluationMetrics { + ///

The evaluation metrics for the find matches algorithm.

+ #[serde(rename = "FindMatchesMetrics")] + #[serde(skip_serializing_if = "Option::is_none")] + pub find_matches_metrics: Option, + ///

The type of machine learning transform.

+ #[serde(rename = "TransformType")] + pub transform_type: String, +} + ///

An execution property of a job.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ExecutionProperty { @@ -1711,6 +2024,81 @@ pub struct ExecutionProperty { pub max_concurrent_runs: Option, } +///

Specifies configuration properties for an exporting labels task run.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ExportLabelsTaskRunProperties { + ///

The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.

+ #[serde(rename = "OutputS3Path")] + #[serde(skip_serializing_if = "Option::is_none")] + pub output_s3_path: Option, +} + +///

The evaluation metrics for the find matches algorithm. The quality of your machine learning transform is measured by getting your transform to predict some matches and comparing the results to known matches from the same dataset. The quality metrics are based on a subset of your data, so they are not precise.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FindMatchesMetrics { + ///

The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.

For more information, see Precision and recall in Wikipedia.

+ #[serde(rename = "AreaUnderPRCurve")] + #[serde(skip_serializing_if = "Option::is_none")] + pub area_under_pr_curve: Option, + ///

The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.

For more information, see Confusion matrix in Wikipedia.

+ #[serde(rename = "ConfusionMatrix")] + #[serde(skip_serializing_if = "Option::is_none")] + pub confusion_matrix: Option, + ///

The maximum F1 metric indicates the transform's accuracy between 0 and 1, where 1 is the best accuracy.

For more information, see F1 score in Wikipedia.

+ #[serde(rename = "F1")] + #[serde(skip_serializing_if = "Option::is_none")] + pub f1: Option, + ///

The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.

For more information, see Precision and recall in Wikipedia.

+ #[serde(rename = "Precision")] + #[serde(skip_serializing_if = "Option::is_none")] + pub precision: Option, + ///

The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.

For more information, see Precision and recall in Wikipedia.

+ #[serde(rename = "Recall")] + #[serde(skip_serializing_if = "Option::is_none")] + pub recall: Option, +} + +///

The parameters to configure the find matches transform.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FindMatchesParameters { + ///

The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.

Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.

Cost measures how many compute resources, and thus money, are consumed to run the transform.

+ #[serde(rename = "AccuracyCostTradeoff")] + #[serde(skip_serializing_if = "Option::is_none")] + pub accuracy_cost_tradeoff: Option, + ///

The value to switch on or off to force the output to match the provided labels from users. If the value is True, the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False, the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.

Note that setting this value to true may increase the conflation execution time.

+ #[serde(rename = "EnforceProvidedLabels")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enforce_provided_labels: Option, + ///

The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.

The precision metric indicates how often your model is correct when it predicts a match.

The recall metric indicates that for an actual match, how often your model predicts the match.

+ #[serde(rename = "PrecisionRecallTradeoff")] + #[serde(skip_serializing_if = "Option::is_none")] + pub precision_recall_tradeoff: Option, + ///

The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.

+ #[serde(rename = "PrimaryKeyColumnName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub primary_key_column_name: Option, +} + +///

Specifies configuration properties for a Find Matches task run.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FindMatchesTaskRunProperties { + ///

The job ID for the Find Matches task run.

+ #[serde(rename = "JobId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_id: Option, + ///

The name assigned to the job for the Find Matches task run.

+ #[serde(rename = "JobName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_name: Option, + ///

The job run ID for the Find Matches task run.

+ #[serde(rename = "JobRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_run_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetCatalogImportStatusRequest { ///

The ID of the catalog to migrate. Currently, this should be the AWS account ID.

@@ -1720,7 +2108,7 @@ pub struct GetCatalogImportStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCatalogImportStatusResponse { ///

The status of the specified catalog migration.

#[serde(rename = "ImportStatus")] @@ -1736,7 +2124,7 @@ pub struct GetClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetClassifierResponse { ///

The requested classifier.

#[serde(rename = "Classifier")] @@ -1757,7 +2145,7 @@ pub struct GetClassifiersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetClassifiersResponse { ///

The requested list of classifier objects.

#[serde(rename = "Classifiers")] @@ -1775,7 +2163,7 @@ pub struct GetConnectionRequest { #[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, - ///

Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but does have permission to access the rest of the connection properties.

+ ///

Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.

#[serde(rename = "HidePassword")] #[serde(skip_serializing_if = "Option::is_none")] pub hide_password: Option, @@ -1785,7 +2173,7 @@ pub struct GetConnectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectionResponse { ///

The requested connection definition.

#[serde(rename = "Connection")] @@ -1812,11 +2200,11 @@ pub struct GetConnectionsRequest { #[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, - ///

A filter that controls which connections will be returned.

+ ///

A filter that controls which connections are returned.

#[serde(rename = "Filter")] #[serde(skip_serializing_if = "Option::is_none")] pub filter: Option, - ///

Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but does have permission to access the rest of the connection properties.

+ ///

Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.

#[serde(rename = "HidePassword")] #[serde(skip_serializing_if = "Option::is_none")] pub hide_password: Option, @@ -1831,7 +2219,7 @@ pub struct GetConnectionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectionsResponse { ///

A list of requested connection definitions.

#[serde(rename = "ConnectionList")] @@ -1860,7 +2248,7 @@ pub struct GetCrawlerMetricsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCrawlerMetricsResponse { ///

A list of metrics for the specified crawler.

#[serde(rename = "CrawlerMetricsList")] @@ -1880,7 +2268,7 @@ pub struct GetCrawlerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCrawlerResponse { ///

The metadata for the specified crawler.

#[serde(rename = "Crawler")] @@ -1901,7 +2289,7 @@ pub struct GetCrawlersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCrawlersResponse { ///

A list of crawler metadata.

#[serde(rename = "Crawlers")] @@ -1915,14 +2303,14 @@ pub struct GetCrawlersResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDataCatalogEncryptionSettingsRequest { - ///

The ID of the Data Catalog for which to retrieve the security configuration. If none is provided, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog to retrieve the security configuration for. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDataCatalogEncryptionSettingsResponse { ///

The requested security configuration.

#[serde(rename = "DataCatalogEncryptionSettings")] @@ -1932,7 +2320,7 @@ pub struct GetDataCatalogEncryptionSettingsResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDatabaseRequest { - ///

The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1942,9 +2330,9 @@ pub struct GetDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDatabaseResponse { - ///

The definition of the specified database in the catalog.

+ ///

The definition of the specified database in the Data Catalog.

#[serde(rename = "Database")] #[serde(skip_serializing_if = "Option::is_none")] pub database: Option, @@ -1952,7 +2340,7 @@ pub struct GetDatabaseResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDatabasesRequest { - ///

The ID of the Data Catalog from which to retrieve Databases. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog from which to retrieve Databases. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -1967,7 +2355,7 @@ pub struct GetDatabasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDatabasesResponse { ///

A list of Database objects from the specified catalog.

#[serde(rename = "DatabaseList")] @@ -1987,7 +2375,7 @@ pub struct GetDataflowGraphRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDataflowGraphResponse { ///

A list of the edges in the resulting DAG.

#[serde(rename = "DagEdges")] @@ -2001,15 +2389,15 @@ pub struct GetDataflowGraphResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDevEndpointRequest { - ///

Name of the DevEndpoint for which to retrieve information.

+ ///

Name of the DevEndpoint to retrieve information for.

#[serde(rename = "EndpointName")] pub endpoint_name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDevEndpointResponse { - ///

A DevEndpoint definition.

+ ///

A DevEndpoint definition.

#[serde(rename = "DevEndpoint")] #[serde(skip_serializing_if = "Option::is_none")] pub dev_endpoint: Option, @@ -2028,18 +2416,38 @@ pub struct GetDevEndpointsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDevEndpointsResponse { - ///

A list of DevEndpoint definitions.

+ ///

A list of DevEndpoint definitions.

#[serde(rename = "DevEndpoints")] #[serde(skip_serializing_if = "Option::is_none")] pub dev_endpoints: Option>, - ///

A continuation token, if not all DevEndpoint definitions have yet been returned.

+ ///

A continuation token, if not all DevEndpoint definitions have yet been returned.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetJobBookmarkRequest { + ///

The name of the job in question.

+ #[serde(rename = "JobName")] + pub job_name: String, + ///

The unique run identifier associated with this job run.

+ #[serde(rename = "RunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetJobBookmarkResponse { + ///

A structure that defines a point that a job can resume processing.

+ #[serde(rename = "JobBookmarkEntry")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_bookmark_entry: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetJobRequest { ///

The name of the job definition to retrieve.

@@ -2048,7 +2456,7 @@ pub struct GetJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobResponse { ///

The requested job definition.

#[serde(rename = "Job")] @@ -2071,7 +2479,7 @@ pub struct GetJobRunRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobRunResponse { ///

The requested job-run metadata.

#[serde(rename = "JobRun")] @@ -2095,7 +2503,7 @@ pub struct GetJobRunsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobRunsResponse { ///

A list of job-run metadata objects.

#[serde(rename = "JobRuns")] @@ -2120,7 +2528,7 @@ pub struct GetJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobsResponse { ///

A list of job definitions.

#[serde(rename = "Jobs")] @@ -2133,32 +2541,235 @@ pub struct GetJobsResponse { } #[derive(Default, Debug, Clone, PartialEq, Serialize)] -pub struct GetMappingRequest { - ///

Parameters for the mapping.

- #[serde(rename = "Location")] - #[serde(skip_serializing_if = "Option::is_none")] - pub location: Option, - ///

A list of target tables.

- #[serde(rename = "Sinks")] - #[serde(skip_serializing_if = "Option::is_none")] - pub sinks: Option>, - ///

Specifies the source table.

- #[serde(rename = "Source")] - pub source: CatalogEntry, +pub struct GetMLTaskRunRequest { + ///

The unique identifier of the task run.

+ #[serde(rename = "TaskRunId")] + pub task_run_id: String, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] -pub struct GetMappingResponse { - ///

A list of mappings to the specified targets.

- #[serde(rename = "Mapping")] - pub mapping: Vec, +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMLTaskRunResponse { + ///

The date and time when this task run was completed.

+ #[serde(rename = "CompletedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub completed_on: Option, + ///

The error strings that are associated with the task run.

+ #[serde(rename = "ErrorString")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_string: Option, + ///

The amount of time (in seconds) that the task run consumed resources.

+ #[serde(rename = "ExecutionTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_time: Option, + ///

The date and time when this task run was last modified.

+ #[serde(rename = "LastModifiedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_on: Option, + ///

The names of the log groups that are associated with the task run.

+ #[serde(rename = "LogGroupName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub log_group_name: Option, + ///

The list of properties that are associated with the task run.

+ #[serde(rename = "Properties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub properties: Option, + ///

The date and time when this task run started.

+ #[serde(rename = "StartedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub started_on: Option, + ///

The status for this task run.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The unique run identifier associated with this run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, + ///

The unique identifier of the task run.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] -pub struct GetPartitionRequest { - ///

The ID of the Data Catalog where the partition in question resides. If none is supplied, the AWS account ID is used by default.

- #[serde(rename = "CatalogId")] +pub struct GetMLTaskRunsRequest { + ///

The filter criteria, in the TaskRunFilterCriteria structure, for the task run.

+ #[serde(rename = "Filter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + ///

The maximum number of results to return.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

A token for pagination of the results. The default is empty.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The sorting criteria, in the TaskRunSortCriteria structure, for the task run.

+ #[serde(rename = "Sort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sort: Option, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMLTaskRunsResponse { + ///

A pagination token, if more results are available.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of task runs that are associated with the transform.

+ #[serde(rename = "TaskRuns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_runs: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMLTransformRequest { + ///

The unique identifier of the transform, generated at the time that the transform was created.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMLTransformResponse { + ///

The date and time when the transform was created.

+ #[serde(rename = "CreatedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_on: Option, + ///

A description of the transform.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The latest evaluation metrics.

+ #[serde(rename = "EvaluationMetrics")] + #[serde(skip_serializing_if = "Option::is_none")] + pub evaluation_metrics: Option, + ///

A list of AWS Glue table definitions used by the transform.

+ #[serde(rename = "InputRecordTables")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_record_tables: Option>, + ///

The number of labels available for this transform.

+ #[serde(rename = "LabelCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub label_count: Option, + ///

The date and time when the transform was last modified.

+ #[serde(rename = "LastModifiedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_on: Option, + ///

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

+ #[serde(rename = "MaxCapacity")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_capacity: Option, + ///

The maximum number of times to retry a task for this transform after a task run fails.

+ #[serde(rename = "MaxRetries")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_retries: Option, + ///

The unique name given to the transform when it was created.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The number of workers of a defined workerType that are allocated when this task runs.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

The configuration parameters that are specific to the algorithm used.

+ #[serde(rename = "Parameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option, + ///

The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.

+ #[serde(rename = "Role")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role: Option, + ///

The Map<Column, Type> object that represents the schema that this transform accepts. Has an upper bound of 100 columns.

+ #[serde(rename = "Schema")] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema: Option>, + ///

The last known status of the transform (to indicate whether it can be used or not). One of "NOT_READY", "READY", or "DELETING".

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

+ #[serde(rename = "Timeout")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timeout: Option, + ///

The unique identifier of the transform, generated at the time that the transform was created.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, + ///

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.

  • For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMLTransformsRequest { + ///

The filter transformation criteria.

+ #[serde(rename = "Filter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + ///

The maximum number of results to return.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

A paginated token to offset the results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The sorting criteria.

+ #[serde(rename = "Sort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sort: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMLTransformsResponse { + ///

A pagination token, if more results are available.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of machine learning transforms.

+ #[serde(rename = "Transforms")] + pub transforms: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMappingRequest { + ///

Parameters for the mapping.

+ #[serde(rename = "Location")] + #[serde(skip_serializing_if = "Option::is_none")] + pub location: Option, + ///

A list of target tables.

+ #[serde(rename = "Sinks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sinks: Option>, + ///

Specifies the source table.

+ #[serde(rename = "Source")] + pub source: CatalogEntry, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMappingResponse { + ///

A list of mappings to the specified targets.

+ #[serde(rename = "Mapping")] + pub mapping: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetPartitionRequest { + ///

The ID of the Data Catalog where the partition in question resides. If none is provided, the AWS account ID is used by default.

+ #[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, ///

The name of the catalog database where the partition resides.

@@ -2173,7 +2784,7 @@ pub struct GetPartitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPartitionResponse { ///

The requested information, in the form of a Partition object.

#[serde(rename = "Partition")] @@ -2183,14 +2794,14 @@ pub struct GetPartitionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetPartitionsRequest { - ///

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the partitions in question reside. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, ///

The name of the catalog database where the partitions reside.

#[serde(rename = "DatabaseName")] pub database_name: String, - ///

An expression filtering the partitions to be returned.

The expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.

Operators: The following are the operators that you can use in the Expression API call:

=

Checks if the values of the two operands are equal or not; if yes, then the condition becomes true.

Example: Assume 'variable a' holds 10 and 'variable b' holds 20.

(a = b) is not true.

< >

Checks if the values of two operands are equal or not; if the values are not equal, then the condition becomes true.

Example: (a < > b) is true.

>

Checks if the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.

Example: (a > b) is not true.

<

Checks if the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.

Example: (a < b) is true.

>=

Checks if the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.

Example: (a >= b) is not true.

<=

Checks if the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.

Example: (a <= b) is true.

AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL

Logical operators.

Supported Partition Key Types: The following are the the supported partition keys.

  • string

  • date

  • timestamp

  • int

  • bigint

  • long

  • tinyint

  • smallint

  • decimal

If an invalid type is encountered, an exception is thrown.

The following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING, to be compatible with the catalog partitions.

Sample API Call:

+ ///

An expression that filters the partitions to be returned.

The expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.

Operators: The following are the operators that you can use in the Expression API call:

=

Checks whether the values of the two operands are equal; if yes, then the condition becomes true.

Example: Assume 'variable a' holds 10 and 'variable b' holds 20.

(a = b) is not true.

< >

Checks whether the values of two operands are equal; if the values are not equal, then the condition becomes true.

Example: (a < > b) is true.

>

Checks whether the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.

Example: (a > b) is not true.

<

Checks whether the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.

Example: (a < b) is true.

>=

Checks whether the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.

Example: (a >= b) is not true.

<=

Checks whether the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.

Example: (a <= b) is true.

AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL

Logical operators.

Supported Partition Key Types: The following are the supported partition keys.

  • string

  • date

  • timestamp

  • int

  • bigint

  • long

  • tinyint

  • smallint

  • decimal

If an invalid type is encountered, an exception is thrown.

The following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING, to be compatible with the catalog partitions.

Sample API Call:

#[serde(rename = "Expression")] #[serde(skip_serializing_if = "Option::is_none")] pub expression: Option, @@ -2212,9 +2823,9 @@ pub struct GetPartitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPartitionsResponse { - ///

A continuation token, if the returned list of partitions does not does not include the last one.

+ ///

A continuation token, if the returned list of partitions does not include the last one.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -2247,7 +2858,7 @@ pub struct GetPlanRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPlanResponse { ///

A Python script to perform the mapping.

#[serde(rename = "PythonScript")] @@ -2263,7 +2874,7 @@ pub struct GetPlanResponse { pub struct GetResourcePolicyRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourcePolicyResponse { ///

The date and time at which the policy was created.

#[serde(rename = "CreateTime")] @@ -2291,9 +2902,9 @@ pub struct GetSecurityConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSecurityConfigurationResponse { - ///

The requested security configuration

+ ///

The requested security configuration.

#[serde(rename = "SecurityConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub security_configuration: Option, @@ -2312,7 +2923,7 @@ pub struct GetSecurityConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSecurityConfigurationsResponse { ///

A continuation token, if there are more security configurations to return.

#[serde(rename = "NextToken")] @@ -2326,7 +2937,7 @@ pub struct GetSecurityConfigurationsResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetTableRequest { - ///

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -2339,7 +2950,7 @@ pub struct GetTableRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTableResponse { ///

The Table object that defines the specified table.

#[serde(rename = "Table")] @@ -2349,7 +2960,7 @@ pub struct GetTableResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetTableVersionRequest { - ///

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -2366,7 +2977,7 @@ pub struct GetTableVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTableVersionResponse { ///

The requested table version.

#[serde(rename = "TableVersion")] @@ -2376,7 +2987,7 @@ pub struct GetTableVersionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetTableVersionsRequest { - ///

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -2397,7 +3008,7 @@ pub struct GetTableVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTableVersionsResponse { ///

A continuation token, if the list of available versions does not include the last one.

#[serde(rename = "NextToken")] @@ -2411,7 +3022,7 @@ pub struct GetTableVersionsResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetTablesRequest { - ///

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -2433,7 +3044,7 @@ pub struct GetTablesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTablesResponse { ///

A continuation token, present if the current list segment is not the last.

#[serde(rename = "NextToken")] @@ -2453,7 +3064,7 @@ pub struct GetTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTagsResponse { ///

The requested tags.

#[serde(rename = "Tags")] @@ -2469,7 +3080,7 @@ pub struct GetTriggerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTriggerResponse { ///

The requested trigger definition.

#[serde(rename = "Trigger")] @@ -2494,7 +3105,7 @@ pub struct GetTriggersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTriggersResponse { ///

A continuation token, if not all the requested triggers have yet been returned.

#[serde(rename = "NextToken")] @@ -2508,7 +3119,7 @@ pub struct GetTriggersResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetUserDefinedFunctionRequest { - ///

The ID of the Data Catalog where the function to be retrieved is located. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the function to be retrieved is located. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -2521,7 +3132,7 @@ pub struct GetUserDefinedFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserDefinedFunctionResponse { ///

The requested function definition.

#[serde(rename = "UserDefinedFunction")] @@ -2531,7 +3142,7 @@ pub struct GetUserDefinedFunctionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetUserDefinedFunctionsRequest { - ///

The ID of the Data Catalog where the functions to be retrieved are located. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the functions to be retrieved are located. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -2552,7 +3163,7 @@ pub struct GetUserDefinedFunctionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUserDefinedFunctionsResponse { ///

A continuation token, if the list of functions returned does not include the last requested function.

#[serde(rename = "NextToken")] @@ -2564,9 +3175,122 @@ pub struct GetUserDefinedFunctionsResponse { pub user_defined_functions: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetWorkflowRequest { + ///

Specifies whether to include a graph when returning the workflow resource metadata.

+ #[serde(rename = "IncludeGraph")] + #[serde(skip_serializing_if = "Option::is_none")] + pub include_graph: Option, + ///

The name of the workflow to retrieve.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetWorkflowResponse { + ///

The resource metadata for the workflow.

+ #[serde(rename = "Workflow")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetWorkflowRunPropertiesRequest { + ///

Name of the workflow which was run.

+ #[serde(rename = "Name")] + pub name: String, + ///

The ID of the workflow run whose run properties should be returned.

+ #[serde(rename = "RunId")] + pub run_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetWorkflowRunPropertiesResponse { + ///

The workflow run properties which were set during the specified run.

+ #[serde(rename = "RunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub run_properties: Option<::std::collections::HashMap>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetWorkflowRunRequest { + ///

Specifies whether to include the workflow graph in response or not.

+ #[serde(rename = "IncludeGraph")] + #[serde(skip_serializing_if = "Option::is_none")] + pub include_graph: Option, + ///

Name of the workflow being run.

+ #[serde(rename = "Name")] + pub name: String, + ///

The ID of the workflow run.

+ #[serde(rename = "RunId")] + pub run_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetWorkflowRunResponse { + ///

The requested workflow run metadata.

+ #[serde(rename = "Run")] + #[serde(skip_serializing_if = "Option::is_none")] + pub run: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetWorkflowRunsRequest { + ///

Specifies whether to include the workflow graph in response or not.

+ #[serde(rename = "IncludeGraph")] + #[serde(skip_serializing_if = "Option::is_none")] + pub include_graph: Option, + ///

The maximum number of workflow runs to be included in the response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Name of the workflow whose metadata of runs should be returned.

+ #[serde(rename = "Name")] + pub name: String, + ///

The maximum size of the response.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetWorkflowRunsResponse { + ///

A continuation token, if not all requested workflow runs have been returned.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of workflow run metadata objects.

+ #[serde(rename = "Runs")] + #[serde(skip_serializing_if = "Option::is_none")] + pub runs: Option>, +} + +///

The database and table in the AWS Glue Data Catalog that is used for input or output data.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GlueTable { + ///

A unique identifier for the AWS Glue Data Catalog.

+ #[serde(rename = "CatalogId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub catalog_id: Option, + ///

The name of the connection to the AWS Glue Data Catalog.

+ #[serde(rename = "ConnectionName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub connection_name: Option, + ///

A database name in the AWS Glue Data Catalog.

+ #[serde(rename = "DatabaseName")] + pub database_name: String, + ///

A table name in the AWS Glue Data Catalog.

+ #[serde(rename = "TableName")] + pub table_name: String, +} + ///

A classifier that uses grok patterns.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GrokClassifier { ///

An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.

#[serde(rename = "Classification")] @@ -2604,9 +3328,23 @@ pub struct ImportCatalogToGlueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportCatalogToGlueResponse {} +///

Specifies configuration properties for an importing labels task run.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ImportLabelsTaskRunProperties { + ///

The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.

+ #[serde(rename = "InputS3Path")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_s3_path: Option, + ///

Indicates whether to overwrite your existing labels.

+ #[serde(rename = "Replace")] + #[serde(skip_serializing_if = "Option::is_none")] + pub replace: Option, +} + ///

Specifies a JDBC data store to crawl.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct JdbcTarget { @@ -2626,7 +3364,7 @@ pub struct JdbcTarget { ///

Specifies a job definition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

The JobCommand that executes this job.

#[serde(rename = "Command")] @@ -2652,6 +3390,10 @@ pub struct Job { #[serde(rename = "ExecutionProperty")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_property: Option, + ///

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.

For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

Jobs that are created without specifying a Glue version default to Glue 0.9.

+ #[serde(rename = "GlueVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub glue_version: Option, ///

The last point in time when this job definition was modified.

#[serde(rename = "LastModifiedOn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2698,9 +3440,9 @@ pub struct Job { pub worker_type: Option, } -///

Defines a point which a job can resume processing.

+///

Defines a point that a job can resume processing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobBookmarkEntry { ///

The attempt ID number.

#[serde(rename = "Attempt")] @@ -2710,28 +3452,36 @@ pub struct JobBookmarkEntry { #[serde(rename = "JobBookmark")] #[serde(skip_serializing_if = "Option::is_none")] pub job_bookmark: Option, - ///

Name of the job in question.

+ ///

The name of the job in question.

#[serde(rename = "JobName")] #[serde(skip_serializing_if = "Option::is_none")] pub job_name: Option, + ///

The unique run identifier associated with the previous job run.

+ #[serde(rename = "PreviousRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub previous_run_id: Option, ///

The run ID number.

#[serde(rename = "Run")] #[serde(skip_serializing_if = "Option::is_none")] pub run: Option, - ///

Version of the job.

+ ///

The run ID number.

+ #[serde(rename = "RunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, + ///

The version of the job.

#[serde(rename = "Version")] #[serde(skip_serializing_if = "Option::is_none")] pub version: Option, } -///

Specifies how Job bookmark data should be encrypted.

+///

Specifies how job bookmark data should be encrypted.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct JobBookmarksEncryption { - ///

The encryption mode to use for Job bookmarks data.

+ ///

The encryption mode to use for job bookmarks data.

#[serde(rename = "JobBookmarksEncryptionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub job_bookmarks_encryption_mode: Option, - ///

The AWS ARN of the KMS key to be used to encrypt the data.

+ ///

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

#[serde(rename = "KmsKeyArn")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_arn: Option, @@ -2748,15 +3498,25 @@ pub struct JobCommand { #[serde(rename = "PythonVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub python_version: Option, - ///

Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job (required).

+ ///

Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.

#[serde(rename = "ScriptLocation")] #[serde(skip_serializing_if = "Option::is_none")] pub script_location: Option, } +///

The details of a Job node present in the workflow.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct JobNodeDetails { + ///

The information for the job runs represented by the job node.

+ #[serde(rename = "JobRuns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_runs: Option>, +} + ///

Contains information about a job run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobRun { ///

The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

#[serde(rename = "Arguments")] @@ -2778,6 +3538,10 @@ pub struct JobRun { #[serde(rename = "ExecutionTime")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_time: Option, + ///

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.

For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

Jobs that are created without specifying a Glue version default to Glue 0.9.

+ #[serde(rename = "GlueVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub glue_version: Option, ///

The ID of this job run.

#[serde(rename = "Id")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2863,6 +3627,10 @@ pub struct JobUpdate { #[serde(rename = "ExecutionProperty")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_property: Option, + ///

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.

For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

+ #[serde(rename = "GlueVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub glue_version: Option, ///

This field is reserved for future use.

#[serde(rename = "LogUri")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2903,7 +3671,7 @@ pub struct JobUpdate { ///

A classifier for JSON content.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JsonClassifier { ///

The time that this classifier was registered.

#[serde(rename = "CreationTime")] @@ -2925,9 +3693,19 @@ pub struct JsonClassifier { pub version: Option, } +///

Specifies configuration properties for a labeling set generation task run.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct LabelingSetGenerationTaskRunProperties { + ///

The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.

+ #[serde(rename = "OutputS3Path")] + #[serde(skip_serializing_if = "Option::is_none")] + pub output_s3_path: Option, +} + ///

Status and error information about the most recent crawl.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LastCrawlInfo { ///

If an error occurred, the error information about the last crawl.

#[serde(rename = "ErrorMessage")] @@ -2972,7 +3750,7 @@ pub struct ListCrawlersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCrawlersResponse { ///

The names of all crawlers in the account, or the crawlers with the specified tags.

#[serde(rename = "CrawlerNames")] @@ -3001,7 +3779,7 @@ pub struct ListDevEndpointsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevEndpointsResponse { ///

The names of all the DevEndpoints in the account, or the DevEndpoints with the specified tags.

#[serde(rename = "DevEndpointNames")] @@ -3030,7 +3808,7 @@ pub struct ListJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResponse { ///

The names of all jobs in the account, or the jobs with the specified tags.

#[serde(rename = "JobNames")] @@ -3063,7 +3841,7 @@ pub struct ListTriggersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTriggersResponse { ///

A continuation token, if the returned list does not contain the last metric available.

#[serde(rename = "NextToken")] @@ -3075,6 +3853,31 @@ pub struct ListTriggersResponse { pub trigger_names: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListWorkflowsRequest { + ///

The maximum size of a list to return.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

A continuation token, if this is a continuation request.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListWorkflowsResponse { + ///

A continuation token, if not all workflow names have been returned.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

List of names of workflows in the account.

+ #[serde(rename = "Workflows")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflows: Option>, +} + ///

The location of resources.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Location { @@ -3092,40 +3895,144 @@ pub struct Location { pub s3: Option>, } -///

Defines a mapping.

-#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MappingEntry { - ///

The source path.

- #[serde(rename = "SourcePath")] +///

A structure for a machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MLTransform { + ///

A timestamp. The time and date that this machine learning transform was created.

+ #[serde(rename = "CreatedOn")] #[serde(skip_serializing_if = "Option::is_none")] - pub source_path: Option, - ///

The name of the source table.

- #[serde(rename = "SourceTable")] + pub created_on: Option, + ///

A user-defined, long-form description text for the machine learning transform. Descriptions are not guaranteed to be unique and can be changed at any time.

+ #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] - pub source_table: Option, - ///

The source type.

- #[serde(rename = "SourceType")] + pub description: Option, + ///

An EvaluationMetrics object. Evaluation metrics provide an estimate of the quality of your machine learning transform.

+ #[serde(rename = "EvaluationMetrics")] #[serde(skip_serializing_if = "Option::is_none")] - pub source_type: Option, - ///

The target path.

- #[serde(rename = "TargetPath")] + pub evaluation_metrics: Option, + ///

A list of AWS Glue table definitions used by the transform.

+ #[serde(rename = "InputRecordTables")] #[serde(skip_serializing_if = "Option::is_none")] - pub target_path: Option, - ///

The target table.

- #[serde(rename = "TargetTable")] + pub input_record_tables: Option>, + ///

A count identifier for the labeling files generated by AWS Glue for this transform. As you create a better transform, you can iteratively download, label, and upload the labeling file.

+ #[serde(rename = "LabelCount")] #[serde(skip_serializing_if = "Option::is_none")] - pub target_table: Option, - ///

The target type.

- #[serde(rename = "TargetType")] + pub label_count: Option, + ///

A timestamp. The last point in time when this machine learning transform was modified.

+ #[serde(rename = "LastModifiedOn")] #[serde(skip_serializing_if = "Option::is_none")] - pub target_type: Option, -} - -///

Specifies configuration properties of a notification.

-#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct NotificationProperty { - ///

After a job run starts, the number of minutes to wait before sending a job run delay notification.

- #[serde(rename = "NotifyDelayAfter")] + pub last_modified_on: Option, + ///

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

+ #[serde(rename = "MaxCapacity")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_capacity: Option, + ///

The maximum number of times to retry after an MLTaskRun of the machine learning transform fails.

+ #[serde(rename = "MaxRetries")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_retries: Option, + ///

A user-defined name for the machine learning transform. Names are not guaranteed unique and can be changed at any time.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The number of workers of a defined workerType that are allocated when a task of the transform runs.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

A TransformParameters object. You can use parameters to tune (customize) the behavior of the machine learning transform by specifying what data it learns from and your preference on various tradeoffs (such as precious vs. recall, or accuracy vs. cost).

+ #[serde(rename = "Parameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option, + ///

The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.

+ #[serde(rename = "Role")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role: Option, + ///

A map of key-value pairs representing the columns and data types that this transform can run against. Has an upper bound of 100 columns.

+ #[serde(rename = "Schema")] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema: Option>, + ///

The current status of the machine learning transform.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The timeout in minutes of the machine learning transform.

+ #[serde(rename = "Timeout")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timeout: Option, + ///

The unique transform ID that is generated for the machine learning transform. The ID is guaranteed to be unique and does not change.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, + ///

The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.

  • For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, +} + +///

Defines a mapping.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MappingEntry { + ///

The source path.

+ #[serde(rename = "SourcePath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_path: Option, + ///

The name of the source table.

+ #[serde(rename = "SourceTable")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_table: Option, + ///

The source type.

+ #[serde(rename = "SourceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_type: Option, + ///

The target path.

+ #[serde(rename = "TargetPath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_path: Option, + ///

The target table.

+ #[serde(rename = "TargetTable")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_table: Option, + ///

The target type.

+ #[serde(rename = "TargetType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_type: Option, +} + +///

A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Node { + ///

Details of the crawler when the node represents a crawler.

+ #[serde(rename = "CrawlerDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crawler_details: Option, + ///

Details of the Job when the node represents a Job.

+ #[serde(rename = "JobDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_details: Option, + ///

The name of the AWS Glue component represented by the node.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

Details of the Trigger when the node represents a Trigger.

+ #[serde(rename = "TriggerDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub trigger_details: Option, + ///

The type of AWS Glue component represented by the node.

+ #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option, + ///

The unique Id assigned to the node within the workflow.

+ #[serde(rename = "UniqueId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unique_id: Option, +} + +///

Specifies configuration properties of a notification.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NotificationProperty { + ///

After a job run starts, the number of minutes to wait before sending a job run delay notification.

+ #[serde(rename = "NotifyDelayAfter")] #[serde(skip_serializing_if = "Option::is_none")] pub notify_delay_after: Option, } @@ -3143,13 +4050,13 @@ pub struct Order { ///

Represents a slice of table data.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Partition { ///

The time at which the partition was created.

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, - ///

The name of the catalog database where the table in question is located.

+ ///

The name of the catalog database in which to create the partition.

#[serde(rename = "DatabaseName")] #[serde(skip_serializing_if = "Option::is_none")] pub database_name: Option, @@ -3169,7 +4076,7 @@ pub struct Partition { #[serde(rename = "StorageDescriptor")] #[serde(skip_serializing_if = "Option::is_none")] pub storage_descriptor: Option, - ///

The name of the table in question.

+ ///

The name of the database table in which to create the partition.

#[serde(rename = "TableName")] #[serde(skip_serializing_if = "Option::is_none")] pub table_name: Option, @@ -3181,9 +4088,9 @@ pub struct Partition { ///

Contains information about a partition error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PartitionError { - ///

Details about the partition error.

+ ///

The details about the partition error.

#[serde(rename = "ErrorDetail")] #[serde(skip_serializing_if = "Option::is_none")] pub error_detail: Option, @@ -3193,7 +4100,7 @@ pub struct PartitionError { pub partition_values: Option>, } -///

The structure used to create and update a partion.

+///

The structure used to create and update a partition.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PartitionInput { ///

The last time at which the partition was accessed.

@@ -3245,7 +4152,7 @@ pub struct PhysicalConnectionRequirements { ///

A job run that was used in the predicate of a conditional trigger that triggered this job run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Predecessor { ///

The name of the job definition used by the predecessor job run.

#[serde(rename = "JobName")] @@ -3270,9 +4177,39 @@ pub struct Predicate { pub logical: Option, } +///

Permissions granted to a principal.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PrincipalPermissions { + ///

The permissions that are granted to the principal.

+ #[serde(rename = "Permissions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub permissions: Option>, + ///

The principal who is granted permissions.

+ #[serde(rename = "Principal")] + #[serde(skip_serializing_if = "Option::is_none")] + pub principal: Option, +} + +///

Defines a property predicate.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PropertyPredicate { + ///

The comparator used to compare this property to others.

+ #[serde(rename = "Comparator")] + #[serde(skip_serializing_if = "Option::is_none")] + pub comparator: Option, + ///

The key of the property.

+ #[serde(rename = "Key")] + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option, + ///

The value of the property.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutDataCatalogEncryptionSettingsRequest { - ///

The ID of the Data Catalog for which to set the security configuration. If none is provided, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -3282,7 +4219,7 @@ pub struct PutDataCatalogEncryptionSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutDataCatalogEncryptionSettingsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3301,7 +4238,7 @@ pub struct PutResourcePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutResourcePolicyResponse { ///

A hash of the policy that has just been set. This must be included in a subsequent call that overwrites or updates this policy.

#[serde(rename = "PolicyHash")] @@ -3309,15 +4246,36 @@ pub struct PutResourcePolicyResponse { pub policy_hash: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutWorkflowRunPropertiesRequest { + ///

Name of the workflow which was run.

+ #[serde(rename = "Name")] + pub name: String, + ///

The ID of the workflow run for which the run properties should be updated.

+ #[serde(rename = "RunId")] + pub run_id: String, + ///

The properties to put for the specified run.

+ #[serde(rename = "RunProperties")] + pub run_properties: ::std::collections::HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PutWorkflowRunPropertiesResponse {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ResetJobBookmarkRequest { ///

The name of the job in question.

#[serde(rename = "JobName")] pub job_name: String, + ///

The unique run identifier associated with this job run.

+ #[serde(rename = "RunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetJobBookmarkResponse { ///

The reset bookmark entry.

#[serde(rename = "JobBookmarkEntry")] @@ -3325,7 +4283,7 @@ pub struct ResetJobBookmarkResponse { pub job_bookmark_entry: Option, } -///

URIs for function resources.

+///

The URIs for function resources.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ResourceUri { ///

The type of the resource.

@@ -3338,14 +4296,14 @@ pub struct ResourceUri { pub uri: Option, } -///

Specifies how S3 data should be encrypted.

+///

Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct S3Encryption { - ///

The AWS ARN of the KMS key to be used to encrypt the data.

+ ///

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

#[serde(rename = "KmsKeyArn")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_arn: Option, - ///

The encryption mode to use for S3 data.

+ ///

The encryption mode to use for Amazon S3 data.

#[serde(rename = "S3EncryptionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub s3_encryption_mode: Option, @@ -3366,7 +4324,7 @@ pub struct S3Target { ///

A scheduling object using a cron statement to schedule an event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Schedule { ///

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

#[serde(rename = "ScheduleExpression")] @@ -3391,9 +4349,63 @@ pub struct SchemaChangePolicy { pub update_behavior: Option, } +///

A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SchemaColumn { + ///

The type of data in the column.

+ #[serde(rename = "DataType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub data_type: Option, + ///

The name of the column.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SearchTablesRequest { + ///

A unique identifier, consisting of account_id/datalake.

+ #[serde(rename = "CatalogId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub catalog_id: Option, + ///

A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.

+ #[serde(rename = "Filters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filters: Option>, + ///

The maximum number of tables to return in a single response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

A continuation token, included if this is a continuation call.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A string used for a text search.

Specifying a value in quotes filters based on an exact match to the value.

+ #[serde(rename = "SearchText")] + #[serde(skip_serializing_if = "Option::is_none")] + pub search_text: Option, + ///

A list of criteria for sorting the results by a field name, in an ascending or descending order.

+ #[serde(rename = "SortCriteria")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sort_criteria: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct SearchTablesResponse { + ///

A continuation token, present if the current list segment is not the last.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of the requested Table objects. The SearchTables response returns only the tables that you have access to.

+ #[serde(rename = "TableList")] + #[serde(skip_serializing_if = "Option::is_none")] + pub table_list: Option>, +} + ///

Specifies a security configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityConfiguration { ///

The time at which this security configuration was created.

#[serde(rename = "CreatedTimeStamp")] @@ -3412,15 +4424,15 @@ pub struct SecurityConfiguration { ///

Defines a non-overlapping region of a table's partitions, allowing multiple requests to be executed in parallel.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Segment { - ///

The zero-based index number of the this segment. For example, if the total number of segments is 4, SegmentNumber values will range from zero through three.

+ ///

The zero-based index number of the segment. For example, if the total number of segments is 4, SegmentNumber values range from 0 through 3.

#[serde(rename = "SegmentNumber")] pub segment_number: i64, - ///

The total numer of segments.

+ ///

The total number of segments.

#[serde(rename = "TotalSegments")] pub total_segments: i64, } -///

Information about a serialization/deserialization program (SerDe) which serves as an extractor and loader.

+///

Information about a serialization/deserialization program (SerDe) that serves as an extractor and loader.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SerDeInfo { ///

Name of the SerDe.

@@ -3431,13 +4443,13 @@ pub struct SerDeInfo { #[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap>, - ///

Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.

+ ///

Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.

#[serde(rename = "SerializationLibrary")] #[serde(skip_serializing_if = "Option::is_none")] pub serialization_library: Option, } -///

Specifies skewed values in a table. Skewed are ones that occur with very high frequency.

+///

Specifies skewed values in a table. Skewed values are those that occur with very high frequency.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SkewedInfo { ///

A list of names of columns that contain skewed values.

@@ -3454,6 +4466,16 @@ pub struct SkewedInfo { pub skewed_column_values: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SortCriterion { + #[serde(rename = "FieldName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub field_name: Option, + #[serde(rename = "Sort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sort: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartCrawlerRequest { ///

Name of the crawler to start.

@@ -3462,7 +4484,7 @@ pub struct StartCrawlerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartCrawlerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3473,9 +4495,51 @@ pub struct StartCrawlerScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartCrawlerScheduleResponse {} +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartExportLabelsTaskRunRequest { + ///

The Amazon S3 path where you export the labels.

+ #[serde(rename = "OutputS3Path")] + pub output_s3_path: String, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartExportLabelsTaskRunResponse { + ///

The unique identifier for the task run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartImportLabelsTaskRunRequest { + ///

The Amazon Simple Storage Service (Amazon S3) path from where you import the labels.

+ #[serde(rename = "InputS3Path")] + pub input_s3_path: String, + ///

Indicates whether to overwrite your existing labels.

+ #[serde(rename = "ReplaceAllLabels")] + #[serde(skip_serializing_if = "Option::is_none")] + pub replace_all_labels: Option, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartImportLabelsTaskRunResponse { + ///

The unique identifier for the task run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartJobRunRequest { ///

The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

@@ -3516,7 +4580,7 @@ pub struct StartJobRunRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartJobRunResponse { ///

The ID assigned to this job run.

#[serde(rename = "JobRunId")] @@ -3524,6 +4588,41 @@ pub struct StartJobRunResponse { pub job_run_id: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartMLEvaluationTaskRunRequest { + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartMLEvaluationTaskRunResponse { + ///

The unique identifier associated with this run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartMLLabelingSetGenerationTaskRunRequest { + ///

The Amazon Simple Storage Service (Amazon S3) path where you generate the labeling set.

+ #[serde(rename = "OutputS3Path")] + pub output_s3_path: String, + ///

The unique identifier of the machine learning transform.

+ #[serde(rename = "TransformId")] + pub transform_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartMLLabelingSetGenerationTaskRunResponse { + ///

The unique run identifier that is associated with this task run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartTriggerRequest { ///

The name of the trigger to start.

@@ -3532,7 +4631,7 @@ pub struct StartTriggerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartTriggerResponse { ///

The name of the trigger that was started.

#[serde(rename = "Name")] @@ -3540,6 +4639,22 @@ pub struct StartTriggerResponse { pub name: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartWorkflowRunRequest { + ///

The name of the workflow to start.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartWorkflowRunResponse { + ///

An Id for the new run.

+ #[serde(rename = "RunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StopCrawlerRequest { ///

Name of the crawler to stop.

@@ -3548,7 +4663,7 @@ pub struct StopCrawlerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopCrawlerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3559,7 +4674,7 @@ pub struct StopCrawlerScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopCrawlerScheduleResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3570,7 +4685,7 @@ pub struct StopTriggerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopTriggerResponse { ///

The name of the trigger that was stopped.

#[serde(rename = "Name")] @@ -3589,7 +4704,7 @@ pub struct StorageDescriptor { #[serde(rename = "Columns")] #[serde(skip_serializing_if = "Option::is_none")] pub columns: Option>, - ///

True if the data in the table is compressed, or False if not.

+ ///

True if the data in the table is compressed, or False if not.

#[serde(rename = "Compressed")] #[serde(skip_serializing_if = "Option::is_none")] pub compressed: Option, @@ -3597,7 +4712,7 @@ pub struct StorageDescriptor { #[serde(rename = "InputFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub input_format: Option, - ///

The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.

+ ///

The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.

#[serde(rename = "Location")] #[serde(skip_serializing_if = "Option::is_none")] pub location: Option, @@ -3609,15 +4724,15 @@ pub struct StorageDescriptor { #[serde(rename = "OutputFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub output_format: Option, - ///

User-supplied properties in key-value form.

+ ///

The user-supplied properties in key-value form.

#[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap>, - ///

Serialization/deserialization (SerDe) information.

+ ///

The serialization/deserialization (SerDe) information.

#[serde(rename = "SerdeInfo")] #[serde(skip_serializing_if = "Option::is_none")] pub serde_info: Option, - ///

Information about values that appear very frequently in a column (skewed values).

+ ///

The information about values that appear frequently in a column (skewed values).

#[serde(rename = "SkewedInfo")] #[serde(skip_serializing_if = "Option::is_none")] pub skewed_info: Option, @@ -3625,7 +4740,7 @@ pub struct StorageDescriptor { #[serde(rename = "SortColumns")] #[serde(skip_serializing_if = "Option::is_none")] pub sort_columns: Option>, - ///

True if the table data is stored in subdirectories, or False if not.

+ ///

True if the table data is stored in subdirectories, or False if not.

#[serde(rename = "StoredAsSubDirectories")] #[serde(skip_serializing_if = "Option::is_none")] pub stored_as_sub_directories: Option, @@ -3633,36 +4748,40 @@ pub struct StorageDescriptor { ///

Represents a collection of related data organized in columns and rows.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Table { - ///

Time when the table definition was created in the Data Catalog.

+ ///

The time when the table definition was created in the Data Catalog.

#[serde(rename = "CreateTime")] #[serde(skip_serializing_if = "Option::is_none")] pub create_time: Option, - ///

Person or entity who created the table.

+ ///

The person or entity who created the table.

#[serde(rename = "CreatedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub created_by: Option, - ///

Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase.

+ ///

The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.

#[serde(rename = "DatabaseName")] #[serde(skip_serializing_if = "Option::is_none")] pub database_name: Option, - ///

Description of the table.

+ ///

A description of the table.

#[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

Last time the table was accessed. This is usually taken from HDFS, and may not be reliable.

+ ///

Indicates whether the table has been registered with AWS Lake Formation.

+ #[serde(rename = "IsRegisteredWithLakeFormation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub is_registered_with_lake_formation: Option, + ///

The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.

#[serde(rename = "LastAccessTime")] #[serde(skip_serializing_if = "Option::is_none")] pub last_access_time: Option, - ///

Last time column statistics were computed for this table.

+ ///

The last time that column statistics were computed for this table.

#[serde(rename = "LastAnalyzedTime")] #[serde(skip_serializing_if = "Option::is_none")] pub last_analyzed_time: Option, - ///

Name of the table. For Hive compatibility, this must be entirely lowercase.

+ ///

The table name. For Hive compatibility, this must be entirely lowercase.

#[serde(rename = "Name")] pub name: String, - ///

Owner of the table.

+ ///

The owner of the table.

#[serde(rename = "Owner")] #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, @@ -3670,11 +4789,11 @@ pub struct Table { #[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap>, - ///

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

When creating a table used by Athena, and you do not specify any partitionKeys, you must at least set the value of partitionKeys to an empty list. For example:

"PartitionKeys": []

+ ///

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

When you create a table used by Amazon Athena, and you do not specify any partitionKeys, you must at least set the value of partitionKeys to an empty list. For example:

"PartitionKeys": []

#[serde(rename = "PartitionKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub partition_keys: Option>, - ///

Retention time for this table.

+ ///

The retention time for this table.

#[serde(rename = "Retention")] #[serde(skip_serializing_if = "Option::is_none")] pub retention: Option, @@ -3686,7 +4805,7 @@ pub struct Table { #[serde(rename = "TableType")] #[serde(skip_serializing_if = "Option::is_none")] pub table_type: Option, - ///

Last time the table was updated.

+ ///

The last time that the table was updated.

#[serde(rename = "UpdateTime")] #[serde(skip_serializing_if = "Option::is_none")] pub update_time: Option, @@ -3702,37 +4821,37 @@ pub struct Table { ///

An error record for table operations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TableError { - ///

Detail about the error.

+ ///

The details about the error.

#[serde(rename = "ErrorDetail")] #[serde(skip_serializing_if = "Option::is_none")] pub error_detail: Option, - ///

Name of the table. For Hive compatibility, this must be entirely lowercase.

+ ///

The name of the table. For Hive compatibility, this must be entirely lowercase.

#[serde(rename = "TableName")] #[serde(skip_serializing_if = "Option::is_none")] pub table_name: Option, } -///

Structure used to create or update the table.

+///

A structure used to define a table.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TableInput { - ///

Description of the table.

+ ///

A description of the table.

#[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

Last time the table was accessed.

+ ///

The last time that the table was accessed.

#[serde(rename = "LastAccessTime")] #[serde(skip_serializing_if = "Option::is_none")] pub last_access_time: Option, - ///

Last time column statistics were computed for this table.

+ ///

The last time that column statistics were computed for this table.

#[serde(rename = "LastAnalyzedTime")] #[serde(skip_serializing_if = "Option::is_none")] pub last_analyzed_time: Option, - ///

Name of the table. For Hive compatibility, this is folded to lowercase when it is stored.

+ ///

The table name. For Hive compatibility, this is folded to lowercase when it is stored.

#[serde(rename = "Name")] pub name: String, - ///

Owner of the table.

+ ///

The table owner.

#[serde(rename = "Owner")] #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, @@ -3740,11 +4859,11 @@ pub struct TableInput { #[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap>, - ///

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

When creating a table used by Athena, and you do not specify any partitionKeys, you must at least set the value of partitionKeys to an empty list. For example:

"PartitionKeys": []

+ ///

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

When you create a table used by Amazon Athena, and you do not specify any partitionKeys, you must at least set the value of partitionKeys to an empty list. For example:

"PartitionKeys": []

#[serde(rename = "PartitionKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub partition_keys: Option>, - ///

Retention time for this table.

+ ///

The retention time for this table.

#[serde(rename = "Retention")] #[serde(skip_serializing_if = "Option::is_none")] pub retention: Option, @@ -3768,9 +4887,9 @@ pub struct TableInput { ///

Specifies a version of a table.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TableVersion { - ///

The table in question

+ ///

The table in question.

#[serde(rename = "Table")] #[serde(skip_serializing_if = "Option::is_none")] pub table: Option, @@ -3782,9 +4901,9 @@ pub struct TableVersion { ///

An error record for table-version operations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TableVersionError { - ///

Detail about the error.

+ ///

The details about the error.

#[serde(rename = "ErrorDetail")] #[serde(skip_serializing_if = "Option::is_none")] pub error_detail: Option, @@ -3809,12 +4928,176 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} +///

The sampling parameters that are associated with the machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TaskRun { + ///

The last point in time that the requested task run was completed.

+ #[serde(rename = "CompletedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub completed_on: Option, + ///

The list of error strings associated with this task run.

+ #[serde(rename = "ErrorString")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_string: Option, + ///

The amount of time (in seconds) that the task run consumed resources.

+ #[serde(rename = "ExecutionTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_time: Option, + ///

The last point in time that the requested task run was updated.

+ #[serde(rename = "LastModifiedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_on: Option, + ///

The names of the log group for secure logging, associated with this task run.

+ #[serde(rename = "LogGroupName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub log_group_name: Option, + ///

Specifies configuration properties associated with this task run.

+ #[serde(rename = "Properties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub properties: Option, + ///

The date and time that this task run started.

+ #[serde(rename = "StartedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub started_on: Option, + ///

The current status of the requested task run.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The unique identifier for this task run.

+ #[serde(rename = "TaskRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_id: Option, + ///

The unique identifier for the transform.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, +} + +///

The criteria that are used to filter the task runs for the machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TaskRunFilterCriteria { + ///

Filter on task runs started after this date.

+ #[serde(rename = "StartedAfter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub started_after: Option, + ///

Filter on task runs started before this date.

+ #[serde(rename = "StartedBefore")] + #[serde(skip_serializing_if = "Option::is_none")] + pub started_before: Option, + ///

The current status of the task run.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The type of task run.

+ #[serde(rename = "TaskRunType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_run_type: Option, +} + +///

The configuration properties for the task run.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TaskRunProperties { + ///

The configuration properties for an exporting labels task run.

+ #[serde(rename = "ExportLabelsTaskRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub export_labels_task_run_properties: Option, + ///

The configuration properties for a find matches task run.

+ #[serde(rename = "FindMatchesTaskRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub find_matches_task_run_properties: Option, + ///

The configuration properties for an importing labels task run.

+ #[serde(rename = "ImportLabelsTaskRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub import_labels_task_run_properties: Option, + ///

The configuration properties for a labeling set generation task run.

+ #[serde(rename = "LabelingSetGenerationTaskRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub labeling_set_generation_task_run_properties: Option, + ///

The type of task run.

+ #[serde(rename = "TaskType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_type: Option, +} + +///

The sorting criteria that are used to sort the list of task runs for the machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TaskRunSortCriteria { + ///

The column to be used to sort the list of task runs for the machine learning transform.

+ #[serde(rename = "Column")] + pub column: String, + ///

The sort direction to be used to sort the list of task runs for the machine learning transform.

+ #[serde(rename = "SortDirection")] + pub sort_direction: String, +} + +///

The criteria used to filter the machine learning transforms.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TransformFilterCriteria { + ///

The time and date after which the transforms were created.

+ #[serde(rename = "CreatedAfter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_after: Option, + ///

The time and date before which the transforms were created.

+ #[serde(rename = "CreatedBefore")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_before: Option, + ///

Filter on transforms last modified after this date.

+ #[serde(rename = "LastModifiedAfter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_after: Option, + ///

Filter on transforms last modified before this date.

+ #[serde(rename = "LastModifiedBefore")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_before: Option, + ///

A unique transform name that is used to filter the machine learning transforms.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.

+ #[serde(rename = "Schema")] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema: Option>, + ///

Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of "NOT_READY", "READY", or "DELETING".

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The type of machine learning transform that is used to filter the machine learning transforms.

+ #[serde(rename = "TransformType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_type: Option, +} + +///

The algorithm-specific parameters that are associated with the machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TransformParameters { + ///

The parameters for the find matches algorithm.

+ #[serde(rename = "FindMatchesParameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub find_matches_parameters: Option, + ///

The type of machine learning transform.

For information about the types of machine learning transforms, see Creating Machine Learning Transforms.

+ #[serde(rename = "TransformType")] + pub transform_type: String, +} + +///

The sorting criteria that are associated with the machine learning transform.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TransformSortCriteria { + ///

The column to be used in the sorting criteria that are associated with the machine learning transform.

+ #[serde(rename = "Column")] + pub column: String, + ///

The sort direction to be used in the sorting criteria that are associated with the machine learning transform.

+ #[serde(rename = "SortDirection")] + pub sort_direction: String, +} + ///

Information about a specific trigger.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Trigger { ///

The actions initiated by this trigger.

#[serde(rename = "Actions")] @@ -3848,10 +5131,24 @@ pub struct Trigger { #[serde(rename = "Type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, + ///

The name of the workflow associated with the trigger.

+ #[serde(rename = "WorkflowName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_name: Option, } -///

A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.

-#[derive(Default, Debug, Clone, PartialEq, Serialize)] +///

The details of a Trigger node present in the workflow.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TriggerNodeDetails { + ///

The information of the trigger represented by the trigger node.

+ #[serde(rename = "Trigger")] + #[serde(skip_serializing_if = "Option::is_none")] + pub trigger: Option, +} + +///

A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TriggerUpdate { ///

The actions initiated by this trigger.

#[serde(rename = "Actions")] @@ -3886,7 +5183,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3910,7 +5207,7 @@ pub struct UpdateClassifierRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateClassifierResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3928,7 +5225,7 @@ pub struct UpdateConnectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConnectionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3979,7 +5276,7 @@ pub struct UpdateCrawlerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCrawlerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3994,7 +5291,7 @@ pub struct UpdateCrawlerScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCrawlerScheduleResponse {} ///

Specifies a custom CSV classifier to be updated.

@@ -4031,7 +5328,7 @@ pub struct UpdateCsvClassifierRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDatabaseRequest { - ///

The ID of the Data Catalog in which the metadata database resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog in which the metadata database resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, @@ -4044,46 +5341,46 @@ pub struct UpdateDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDatabaseResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDevEndpointRequest { - ///

The map of arguments to add the map of arguments used to configure the DevEndpoint.

+ ///

The map of arguments to add the map of arguments used to configure the DevEndpoint.

#[serde(rename = "AddArguments")] #[serde(skip_serializing_if = "Option::is_none")] pub add_arguments: Option<::std::collections::HashMap>, - ///

The list of public keys for the DevEndpoint to use.

+ ///

The list of public keys for the DevEndpoint to use.

#[serde(rename = "AddPublicKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub add_public_keys: Option>, - ///

Custom Python or Java libraries to be loaded in the DevEndpoint.

+ ///

Custom Python or Java libraries to be loaded in the DevEndpoint.

#[serde(rename = "CustomLibraries")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_libraries: Option, - ///

The list of argument keys to be deleted from the map of arguments used to configure the DevEndpoint.

+ ///

The list of argument keys to be deleted from the map of arguments used to configure the DevEndpoint.

#[serde(rename = "DeleteArguments")] #[serde(skip_serializing_if = "Option::is_none")] pub delete_arguments: Option>, - ///

The list of public keys to be deleted from the DevEndpoint.

+ ///

The list of public keys to be deleted from the DevEndpoint.

#[serde(rename = "DeletePublicKeys")] #[serde(skip_serializing_if = "Option::is_none")] pub delete_public_keys: Option>, - ///

The name of the DevEndpoint to be updated.

+ ///

The name of the DevEndpoint to be updated.

#[serde(rename = "EndpointName")] pub endpoint_name: String, - ///

The public key for the DevEndpoint to use.

+ ///

The public key for the DevEndpoint to use.

#[serde(rename = "PublicKey")] #[serde(skip_serializing_if = "Option::is_none")] pub public_key: Option, - ///

True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False otherwise.

+ ///

True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.

#[serde(rename = "UpdateEtlLibraries")] #[serde(skip_serializing_if = "Option::is_none")] pub update_etl_libraries: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDevEndpointResponse {} ///

Specifies a grok classifier to update when passed to UpdateClassifier.

@@ -4117,7 +5414,7 @@ pub struct UpdateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateJobResponse { ///

Returns the name of the updated job definition.

#[serde(rename = "JobName")] @@ -4137,40 +5434,92 @@ pub struct UpdateJsonClassifierRequest { pub name: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateMLTransformRequest { + ///

A description of the transform. The default is an empty string.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

+ #[serde(rename = "MaxCapacity")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_capacity: Option, + ///

The maximum number of times to retry a task for this transform after a task run fails.

+ #[serde(rename = "MaxRetries")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_retries: Option, + ///

The unique name that you gave the transform when you created it.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The number of workers of a defined workerType that are allocated when this task runs.

+ #[serde(rename = "NumberOfWorkers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub number_of_workers: Option, + ///

The configuration parameters that are specific to the transform type (algorithm) used. Conditionally dependent on the transform type.

+ #[serde(rename = "Parameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option, + ///

The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.

+ #[serde(rename = "Role")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role: Option, + ///

The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

+ #[serde(rename = "Timeout")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timeout: Option, + ///

A unique identifier that was generated when the transform was created.

+ #[serde(rename = "TransformId")] + pub transform_id: String, + ///

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.

  • For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.

+ #[serde(rename = "WorkerType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub worker_type: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateMLTransformResponse { + ///

The unique identifier for the transform that was updated.

+ #[serde(rename = "TransformId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transform_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdatePartitionRequest { - ///

The ID of the Data Catalog where the partition to be updated resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the partition to be updated resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, ///

The name of the catalog database in which the table in question resides.

#[serde(rename = "DatabaseName")] pub database_name: String, - ///

The new partition object to which to update the partition.

+ ///

The new partition object to update the partition to.

#[serde(rename = "PartitionInput")] pub partition_input: PartitionInput, ///

A list of the values defining the partition.

#[serde(rename = "PartitionValueList")] pub partition_value_list: Vec, - ///

The name of the table where the partition to be updated is located.

+ ///

The name of the table in which the partition to be updated is located.

#[serde(rename = "TableName")] pub table_name: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePartitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateTableRequest { - ///

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, ///

The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.

#[serde(rename = "DatabaseName")] pub database_name: String, - ///

By default, UpdateTable always creates an archived version of the table before updating it. If skipArchive is set to true, however, UpdateTable does not create the archived version.

+ ///

By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version.

#[serde(rename = "SkipArchive")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_archive: Option, @@ -4180,7 +5529,7 @@ pub struct UpdateTableRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTableResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -4194,7 +5543,7 @@ pub struct UpdateTriggerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTriggerResponse { ///

The resulting trigger definition.

#[serde(rename = "Trigger")] @@ -4204,14 +5553,14 @@ pub struct UpdateTriggerResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateUserDefinedFunctionRequest { - ///

The ID of the Data Catalog where the function to be updated is located. If none is supplied, the AWS account ID is used by default.

+ ///

The ID of the Data Catalog where the function to be updated is located. If none is provided, the AWS account ID is used by default.

#[serde(rename = "CatalogId")] #[serde(skip_serializing_if = "Option::is_none")] pub catalog_id: Option, ///

The name of the catalog database where the function to be updated is located.

#[serde(rename = "DatabaseName")] pub database_name: String, - ///

A FunctionInput object that re-defines the function in the Data Catalog.

+ ///

A FunctionInput object that redefines the function in the Data Catalog.

#[serde(rename = "FunctionInput")] pub function_input: UserDefinedFunctionInput, ///

The name of the function.

@@ -4220,9 +5569,33 @@ pub struct UpdateUserDefinedFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserDefinedFunctionResponse {} +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateWorkflowRequest { + ///

A collection of properties to be used as part of each execution of the workflow.

+ #[serde(rename = "DefaultRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub default_run_properties: Option<::std::collections::HashMap>, + ///

The description of the workflow.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

Name of the workflow to be updated.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateWorkflowResponse { + ///

The name of the workflow which was specified in input.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + ///

Specifies an XML classifier to be updated.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateXMLClassifierRequest { @@ -4241,7 +5614,7 @@ pub struct UpdateXMLClassifierRequest { ///

Represents the equivalent of a Hive user-defined function (UDF) definition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserDefinedFunction { ///

The Java class that contains the function code.

#[serde(rename = "ClassName")] @@ -4269,7 +5642,7 @@ pub struct UserDefinedFunction { pub resource_uris: Option>, } -///

A structure used to create or updata a user-defined function.

+///

A structure used to create or update a user-defined function.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UserDefinedFunctionInput { ///

The Java class that contains the function code.

@@ -4294,9 +5667,125 @@ pub struct UserDefinedFunctionInput { pub resource_uris: Option>, } +///

A workflow represents a flow in which AWS Glue components should be executed to complete a logical task.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Workflow { + ///

The date and time when the workflow was created.

+ #[serde(rename = "CreatedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_on: Option, + ///

A collection of properties to be used as part of each execution of the workflow.

+ #[serde(rename = "DefaultRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub default_run_properties: Option<::std::collections::HashMap>, + ///

A description of the workflow.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.

+ #[serde(rename = "Graph")] + #[serde(skip_serializing_if = "Option::is_none")] + pub graph: Option, + ///

The date and time when the workflow was last modified.

+ #[serde(rename = "LastModifiedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_on: Option, + ///

The information about the last execution of the workflow.

+ #[serde(rename = "LastRun")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run: Option, + ///

The name of the workflow representing the flow.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +///

A workflow graph represents the complete workflow containing all the AWS Glue components present in the workflow and all the directed connections between them.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct WorkflowGraph { + ///

A list of all the directed connections between the nodes belonging to the workflow.

+ #[serde(rename = "Edges")] + #[serde(skip_serializing_if = "Option::is_none")] + pub edges: Option>, + ///

A list of the the AWS Glue components belong to the workflow represented as nodes.

+ #[serde(rename = "Nodes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub nodes: Option>, +} + +///

A workflow run is an execution of a workflow providing all the runtime information.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct WorkflowRun { + ///

The date and time when the workflow run completed.

+ #[serde(rename = "CompletedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub completed_on: Option, + ///

The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.

+ #[serde(rename = "Graph")] + #[serde(skip_serializing_if = "Option::is_none")] + pub graph: Option, + ///

Name of the workflow which was executed.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The date and time when the workflow run was started.

+ #[serde(rename = "StartedOn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub started_on: Option, + ///

The statistics of the run.

+ #[serde(rename = "Statistics")] + #[serde(skip_serializing_if = "Option::is_none")] + pub statistics: Option, + ///

The status of the workflow run.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The ID of this workflow run.

+ #[serde(rename = "WorkflowRunId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_run_id: Option, + ///

The workflow run properties which were set during the run.

+ #[serde(rename = "WorkflowRunProperties")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_run_properties: Option<::std::collections::HashMap>, +} + +///

Workflow run statistics provides statistics about the workflow run.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct WorkflowRunStatistics { + ///

Total number of Actions which have failed.

+ #[serde(rename = "FailedActions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_actions: Option, + ///

Total number Actions in running state.

+ #[serde(rename = "RunningActions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub running_actions: Option, + ///

Total number of Actions which have stopped.

+ #[serde(rename = "StoppedActions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub stopped_actions: Option, + ///

Total number of Actions which have succeeded.

+ #[serde(rename = "SucceededActions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub succeeded_actions: Option, + ///

Total number of Actions which timed out.

+ #[serde(rename = "TimeoutActions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timeout_actions: Option, + ///

Total number of Actions in the workflow run.

+ #[serde(rename = "TotalActions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub total_actions: Option, +} + ///

A classifier for XML content.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct XMLClassifier { ///

An identifier of the data format that the classifier matches.

#[serde(rename = "Classification")] @@ -4846,6 +6335,51 @@ impl Error for BatchGetTriggersError { } } } +/// Errors returned by BatchGetWorkflows +#[derive(Debug, PartialEq)] +pub enum BatchGetWorkflowsError { + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl BatchGetWorkflowsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceException" => { + return RusotoError::Service(BatchGetWorkflowsError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(BatchGetWorkflowsError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(BatchGetWorkflowsError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for BatchGetWorkflowsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for BatchGetWorkflowsError { + fn description(&self) -> &str { + match *self { + BatchGetWorkflowsError::InternalService(ref cause) => cause, + BatchGetWorkflowsError::InvalidInput(ref cause) => cause, + BatchGetWorkflowsError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by BatchStopJobRun #[derive(Debug, PartialEq)] pub enum GlueBatchStopJobRunError { @@ -4893,6 +6427,57 @@ impl Error for GlueBatchStopJobRunError { } } } +/// Errors returned by CancelMLTaskRun +#[derive(Debug, PartialEq)] +pub enum CancelMLTaskRunError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl CancelMLTaskRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(CancelMLTaskRunError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(CancelMLTaskRunError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(CancelMLTaskRunError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(CancelMLTaskRunError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CancelMLTaskRunError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CancelMLTaskRunError { + fn description(&self) -> &str { + match *self { + CancelMLTaskRunError::EntityNotFound(ref cause) => cause, + CancelMLTaskRunError::InternalService(ref cause) => cause, + CancelMLTaskRunError::InvalidInput(ref cause) => cause, + CancelMLTaskRunError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by CreateClassifier #[derive(Debug, PartialEq)] pub enum CreateClassifierError { @@ -5261,6 +6846,79 @@ impl Error for CreateJobError { } } } +/// Errors returned by CreateMLTransform +#[derive(Debug, PartialEq)] +pub enum CreateMLTransformError { + ///

Access to a resource was denied.

+ AccessDenied(String), + ///

A resource to be created or added already exists.

+ AlreadyExists(String), + ///

The same unique identifier was associated with two different records.

+ IdempotentParameterMismatch(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), + ///

A resource numerical limit was exceeded.

+ ResourceNumberLimitExceeded(String), +} + +impl CreateMLTransformError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(CreateMLTransformError::AccessDenied(err.msg)) + } + "AlreadyExistsException" => { + return RusotoError::Service(CreateMLTransformError::AlreadyExists(err.msg)) + } + "IdempotentParameterMismatchException" => { + return RusotoError::Service( + CreateMLTransformError::IdempotentParameterMismatch(err.msg), + ) + } + "InternalServiceException" => { + return RusotoError::Service(CreateMLTransformError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(CreateMLTransformError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(CreateMLTransformError::OperationTimeout(err.msg)) + } + "ResourceNumberLimitExceededException" => { + return RusotoError::Service( + CreateMLTransformError::ResourceNumberLimitExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateMLTransformError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateMLTransformError { + fn description(&self) -> &str { + match *self { + CreateMLTransformError::AccessDenied(ref cause) => cause, + CreateMLTransformError::AlreadyExists(ref cause) => cause, + CreateMLTransformError::IdempotentParameterMismatch(ref cause) => cause, + CreateMLTransformError::InternalService(ref cause) => cause, + CreateMLTransformError::InvalidInput(ref cause) => cause, + CreateMLTransformError::OperationTimeout(ref cause) => cause, + CreateMLTransformError::ResourceNumberLimitExceeded(ref cause) => cause, + } + } +} /// Errors returned by CreatePartition #[derive(Debug, PartialEq)] pub enum CreatePartitionError { @@ -5524,6 +7182,8 @@ pub enum CreateTriggerError { AlreadyExists(String), ///

Two processes are trying to modify a resource simultaneously.

ConcurrentModification(String), + ///

A specified entity does not exist

+ EntityNotFound(String), ///

The same unique identifier was associated with two different records.

IdempotentParameterMismatch(String), ///

An internal service error occurred.

@@ -5548,6 +7208,9 @@ impl CreateTriggerError { err.msg, )) } + "EntityNotFoundException" => { + return RusotoError::Service(CreateTriggerError::EntityNotFound(err.msg)) + } "IdempotentParameterMismatchException" => { return RusotoError::Service(CreateTriggerError::IdempotentParameterMismatch( err.msg, @@ -5584,6 +7247,7 @@ impl Error for CreateTriggerError { match *self { CreateTriggerError::AlreadyExists(ref cause) => cause, CreateTriggerError::ConcurrentModification(ref cause) => cause, + CreateTriggerError::EntityNotFound(ref cause) => cause, CreateTriggerError::IdempotentParameterMismatch(ref cause) => cause, CreateTriggerError::InternalService(ref cause) => cause, CreateTriggerError::InvalidInput(ref cause) => cause, @@ -5675,24 +7339,91 @@ impl Error for CreateUserDefinedFunctionError { } } } -/// Errors returned by DeleteClassifier +/// Errors returned by CreateWorkflow #[derive(Debug, PartialEq)] -pub enum DeleteClassifierError { - ///

A specified entity does not exist

- EntityNotFound(String), +pub enum CreateWorkflowError { + ///

A resource to be created or added already exists.

+ AlreadyExists(String), + ///

Two processes are trying to modify a resource simultaneously.

+ ConcurrentModification(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), ///

The operation timed out.

OperationTimeout(String), + ///

A resource numerical limit was exceeded.

+ ResourceNumberLimitExceeded(String), } -impl DeleteClassifierError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl CreateWorkflowError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { - "EntityNotFoundException" => { - return RusotoError::Service(DeleteClassifierError::EntityNotFound(err.msg)) - } - "OperationTimeoutException" => { - return RusotoError::Service(DeleteClassifierError::OperationTimeout(err.msg)) + "AlreadyExistsException" => { + return RusotoError::Service(CreateWorkflowError::AlreadyExists(err.msg)) + } + "ConcurrentModificationException" => { + return RusotoError::Service(CreateWorkflowError::ConcurrentModification( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(CreateWorkflowError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(CreateWorkflowError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(CreateWorkflowError::OperationTimeout(err.msg)) + } + "ResourceNumberLimitExceededException" => { + return RusotoError::Service(CreateWorkflowError::ResourceNumberLimitExceeded( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateWorkflowError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateWorkflowError { + fn description(&self) -> &str { + match *self { + CreateWorkflowError::AlreadyExists(ref cause) => cause, + CreateWorkflowError::ConcurrentModification(ref cause) => cause, + CreateWorkflowError::InternalService(ref cause) => cause, + CreateWorkflowError::InvalidInput(ref cause) => cause, + CreateWorkflowError::OperationTimeout(ref cause) => cause, + CreateWorkflowError::ResourceNumberLimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by DeleteClassifier +#[derive(Debug, PartialEq)] +pub enum DeleteClassifierError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl DeleteClassifierError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(DeleteClassifierError::EntityNotFound(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(DeleteClassifierError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -5953,6 +7684,57 @@ impl Error for DeleteJobError { } } } +/// Errors returned by DeleteMLTransform +#[derive(Debug, PartialEq)] +pub enum DeleteMLTransformError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl DeleteMLTransformError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(DeleteMLTransformError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(DeleteMLTransformError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeleteMLTransformError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(DeleteMLTransformError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteMLTransformError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteMLTransformError { + fn description(&self) -> &str { + match *self { + DeleteMLTransformError::EntityNotFound(ref cause) => cause, + DeleteMLTransformError::InternalService(ref cause) => cause, + DeleteMLTransformError::InvalidInput(ref cause) => cause, + DeleteMLTransformError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by DeletePartition #[derive(Debug, PartialEq)] pub enum DeletePartitionError { @@ -6342,6 +8124,59 @@ impl Error for DeleteUserDefinedFunctionError { } } } +/// Errors returned by DeleteWorkflow +#[derive(Debug, PartialEq)] +pub enum DeleteWorkflowError { + ///

Two processes are trying to modify a resource simultaneously.

+ ConcurrentModification(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl DeleteWorkflowError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(DeleteWorkflowError::ConcurrentModification( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(DeleteWorkflowError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeleteWorkflowError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(DeleteWorkflowError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteWorkflowError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteWorkflowError { + fn description(&self) -> &str { + match *self { + DeleteWorkflowError::ConcurrentModification(ref cause) => cause, + DeleteWorkflowError::InternalService(ref cause) => cause, + DeleteWorkflowError::InvalidInput(ref cause) => cause, + DeleteWorkflowError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by GetCatalogImportStatus #[derive(Debug, PartialEq)] pub enum GetCatalogImportStatusError { @@ -7023,6 +8858,57 @@ impl Error for GetJobError { } } } +/// Errors returned by GetJobBookmark +#[derive(Debug, PartialEq)] +pub enum GetJobBookmarkError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetJobBookmarkError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetJobBookmarkError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(GetJobBookmarkError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetJobBookmarkError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetJobBookmarkError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetJobBookmarkError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetJobBookmarkError { + fn description(&self) -> &str { + match *self { + GetJobBookmarkError::EntityNotFound(ref cause) => cause, + GetJobBookmarkError::InternalService(ref cause) => cause, + GetJobBookmarkError::InvalidInput(ref cause) => cause, + GetJobBookmarkError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by GetJobRun #[derive(Debug, PartialEq)] pub enum GetJobRunError { @@ -7176,9 +9062,9 @@ impl Error for GetJobsError { } } } -/// Errors returned by GetMapping +/// Errors returned by GetMLTaskRun #[derive(Debug, PartialEq)] -pub enum GetMappingError { +pub enum GetMLTaskRunError { ///

A specified entity does not exist

EntityNotFound(String), ///

An internal service error occurred.

@@ -7189,21 +9075,21 @@ pub enum GetMappingError { OperationTimeout(String), } -impl GetMappingError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetMLTaskRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "EntityNotFoundException" => { - return RusotoError::Service(GetMappingError::EntityNotFound(err.msg)) + return RusotoError::Service(GetMLTaskRunError::EntityNotFound(err.msg)) } "InternalServiceException" => { - return RusotoError::Service(GetMappingError::InternalService(err.msg)) + return RusotoError::Service(GetMLTaskRunError::InternalService(err.msg)) } "InvalidInputException" => { - return RusotoError::Service(GetMappingError::InvalidInput(err.msg)) + return RusotoError::Service(GetMLTaskRunError::InvalidInput(err.msg)) } "OperationTimeoutException" => { - return RusotoError::Service(GetMappingError::OperationTimeout(err.msg)) + return RusotoError::Service(GetMLTaskRunError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -7212,28 +9098,26 @@ impl GetMappingError { return RusotoError::Unknown(res); } } -impl fmt::Display for GetMappingError { +impl fmt::Display for GetMLTaskRunError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for GetMappingError { +impl Error for GetMLTaskRunError { fn description(&self) -> &str { match *self { - GetMappingError::EntityNotFound(ref cause) => cause, - GetMappingError::InternalService(ref cause) => cause, - GetMappingError::InvalidInput(ref cause) => cause, - GetMappingError::OperationTimeout(ref cause) => cause, + GetMLTaskRunError::EntityNotFound(ref cause) => cause, + GetMLTaskRunError::InternalService(ref cause) => cause, + GetMLTaskRunError::InvalidInput(ref cause) => cause, + GetMLTaskRunError::OperationTimeout(ref cause) => cause, } } } -/// Errors returned by GetPartition +/// Errors returned by GetMLTaskRuns #[derive(Debug, PartialEq)] -pub enum GetPartitionError { +pub enum GetMLTaskRunsError { ///

A specified entity does not exist

EntityNotFound(String), - ///

An encryption operation failed.

- GlueEncryption(String), ///

An internal service error occurred.

InternalService(String), ///

The input provided was not valid.

@@ -7242,24 +9126,21 @@ pub enum GetPartitionError { OperationTimeout(String), } -impl GetPartitionError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetMLTaskRunsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "EntityNotFoundException" => { - return RusotoError::Service(GetPartitionError::EntityNotFound(err.msg)) - } - "GlueEncryptionException" => { - return RusotoError::Service(GetPartitionError::GlueEncryption(err.msg)) + return RusotoError::Service(GetMLTaskRunsError::EntityNotFound(err.msg)) } "InternalServiceException" => { - return RusotoError::Service(GetPartitionError::InternalService(err.msg)) + return RusotoError::Service(GetMLTaskRunsError::InternalService(err.msg)) } "InvalidInputException" => { - return RusotoError::Service(GetPartitionError::InvalidInput(err.msg)) + return RusotoError::Service(GetMLTaskRunsError::InvalidInput(err.msg)) } "OperationTimeoutException" => { - return RusotoError::Service(GetPartitionError::OperationTimeout(err.msg)) + return RusotoError::Service(GetMLTaskRunsError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -7268,29 +9149,26 @@ impl GetPartitionError { return RusotoError::Unknown(res); } } -impl fmt::Display for GetPartitionError { +impl fmt::Display for GetMLTaskRunsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for GetPartitionError { +impl Error for GetMLTaskRunsError { fn description(&self) -> &str { match *self { - GetPartitionError::EntityNotFound(ref cause) => cause, - GetPartitionError::GlueEncryption(ref cause) => cause, - GetPartitionError::InternalService(ref cause) => cause, - GetPartitionError::InvalidInput(ref cause) => cause, - GetPartitionError::OperationTimeout(ref cause) => cause, + GetMLTaskRunsError::EntityNotFound(ref cause) => cause, + GetMLTaskRunsError::InternalService(ref cause) => cause, + GetMLTaskRunsError::InvalidInput(ref cause) => cause, + GetMLTaskRunsError::OperationTimeout(ref cause) => cause, } } } -/// Errors returned by GetPartitions +/// Errors returned by GetMLTransform #[derive(Debug, PartialEq)] -pub enum GetPartitionsError { +pub enum GetMLTransformError { ///

A specified entity does not exist

EntityNotFound(String), - ///

An encryption operation failed.

- GlueEncryption(String), ///

An internal service error occurred.

InternalService(String), ///

The input provided was not valid.

@@ -7299,24 +9177,21 @@ pub enum GetPartitionsError { OperationTimeout(String), } -impl GetPartitionsError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetMLTransformError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "EntityNotFoundException" => { - return RusotoError::Service(GetPartitionsError::EntityNotFound(err.msg)) - } - "GlueEncryptionException" => { - return RusotoError::Service(GetPartitionsError::GlueEncryption(err.msg)) + return RusotoError::Service(GetMLTransformError::EntityNotFound(err.msg)) } "InternalServiceException" => { - return RusotoError::Service(GetPartitionsError::InternalService(err.msg)) + return RusotoError::Service(GetMLTransformError::InternalService(err.msg)) } "InvalidInputException" => { - return RusotoError::Service(GetPartitionsError::InvalidInput(err.msg)) + return RusotoError::Service(GetMLTransformError::InvalidInput(err.msg)) } "OperationTimeoutException" => { - return RusotoError::Service(GetPartitionsError::OperationTimeout(err.msg)) + return RusotoError::Service(GetMLTransformError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -7325,25 +9200,26 @@ impl GetPartitionsError { return RusotoError::Unknown(res); } } -impl fmt::Display for GetPartitionsError { +impl fmt::Display for GetMLTransformError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for GetPartitionsError { +impl Error for GetMLTransformError { fn description(&self) -> &str { match *self { - GetPartitionsError::EntityNotFound(ref cause) => cause, - GetPartitionsError::GlueEncryption(ref cause) => cause, - GetPartitionsError::InternalService(ref cause) => cause, - GetPartitionsError::InvalidInput(ref cause) => cause, - GetPartitionsError::OperationTimeout(ref cause) => cause, + GetMLTransformError::EntityNotFound(ref cause) => cause, + GetMLTransformError::InternalService(ref cause) => cause, + GetMLTransformError::InvalidInput(ref cause) => cause, + GetMLTransformError::OperationTimeout(ref cause) => cause, } } } -/// Errors returned by GetPlan +/// Errors returned by GetMLTransforms #[derive(Debug, PartialEq)] -pub enum GetPlanError { +pub enum GetMLTransformsError { + ///

A specified entity does not exist

+ EntityNotFound(String), ///

An internal service error occurred.

InternalService(String), ///

The input provided was not valid.

@@ -7352,18 +9228,21 @@ pub enum GetPlanError { OperationTimeout(String), } -impl GetPlanError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetMLTransformsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetMLTransformsError::EntityNotFound(err.msg)) + } "InternalServiceException" => { - return RusotoError::Service(GetPlanError::InternalService(err.msg)) + return RusotoError::Service(GetMLTransformsError::InternalService(err.msg)) } "InvalidInputException" => { - return RusotoError::Service(GetPlanError::InvalidInput(err.msg)) + return RusotoError::Service(GetMLTransformsError::InvalidInput(err.msg)) } "OperationTimeoutException" => { - return RusotoError::Service(GetPlanError::OperationTimeout(err.msg)) + return RusotoError::Service(GetMLTransformsError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -7372,23 +9251,24 @@ impl GetPlanError { return RusotoError::Unknown(res); } } -impl fmt::Display for GetPlanError { +impl fmt::Display for GetMLTransformsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for GetPlanError { +impl Error for GetMLTransformsError { fn description(&self) -> &str { match *self { - GetPlanError::InternalService(ref cause) => cause, - GetPlanError::InvalidInput(ref cause) => cause, - GetPlanError::OperationTimeout(ref cause) => cause, + GetMLTransformsError::EntityNotFound(ref cause) => cause, + GetMLTransformsError::InternalService(ref cause) => cause, + GetMLTransformsError::InvalidInput(ref cause) => cause, + GetMLTransformsError::OperationTimeout(ref cause) => cause, } } } -/// Errors returned by GetResourcePolicy +/// Errors returned by GetMapping #[derive(Debug, PartialEq)] -pub enum GetResourcePolicyError { +pub enum GetMappingError { ///

A specified entity does not exist

EntityNotFound(String), ///

An internal service error occurred.

@@ -7399,21 +9279,21 @@ pub enum GetResourcePolicyError { OperationTimeout(String), } -impl GetResourcePolicyError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetMappingError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { "EntityNotFoundException" => { - return RusotoError::Service(GetResourcePolicyError::EntityNotFound(err.msg)) + return RusotoError::Service(GetMappingError::EntityNotFound(err.msg)) } "InternalServiceException" => { - return RusotoError::Service(GetResourcePolicyError::InternalService(err.msg)) + return RusotoError::Service(GetMappingError::InternalService(err.msg)) } "InvalidInputException" => { - return RusotoError::Service(GetResourcePolicyError::InvalidInput(err.msg)) + return RusotoError::Service(GetMappingError::InvalidInput(err.msg)) } "OperationTimeoutException" => { - return RusotoError::Service(GetResourcePolicyError::OperationTimeout(err.msg)) + return RusotoError::Service(GetMappingError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -7422,18 +9302,228 @@ impl GetResourcePolicyError { return RusotoError::Unknown(res); } } -impl fmt::Display for GetResourcePolicyError { +impl fmt::Display for GetMappingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for GetResourcePolicyError { +impl Error for GetMappingError { fn description(&self) -> &str { match *self { - GetResourcePolicyError::EntityNotFound(ref cause) => cause, - GetResourcePolicyError::InternalService(ref cause) => cause, - GetResourcePolicyError::InvalidInput(ref cause) => cause, - GetResourcePolicyError::OperationTimeout(ref cause) => cause, + GetMappingError::EntityNotFound(ref cause) => cause, + GetMappingError::InternalService(ref cause) => cause, + GetMappingError::InvalidInput(ref cause) => cause, + GetMappingError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by GetPartition +#[derive(Debug, PartialEq)] +pub enum GetPartitionError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An encryption operation failed.

+ GlueEncryption(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetPartitionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetPartitionError::EntityNotFound(err.msg)) + } + "GlueEncryptionException" => { + return RusotoError::Service(GetPartitionError::GlueEncryption(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(GetPartitionError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetPartitionError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetPartitionError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetPartitionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetPartitionError { + fn description(&self) -> &str { + match *self { + GetPartitionError::EntityNotFound(ref cause) => cause, + GetPartitionError::GlueEncryption(ref cause) => cause, + GetPartitionError::InternalService(ref cause) => cause, + GetPartitionError::InvalidInput(ref cause) => cause, + GetPartitionError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by GetPartitions +#[derive(Debug, PartialEq)] +pub enum GetPartitionsError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An encryption operation failed.

+ GlueEncryption(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetPartitionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetPartitionsError::EntityNotFound(err.msg)) + } + "GlueEncryptionException" => { + return RusotoError::Service(GetPartitionsError::GlueEncryption(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(GetPartitionsError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetPartitionsError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetPartitionsError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetPartitionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetPartitionsError { + fn description(&self) -> &str { + match *self { + GetPartitionsError::EntityNotFound(ref cause) => cause, + GetPartitionsError::GlueEncryption(ref cause) => cause, + GetPartitionsError::InternalService(ref cause) => cause, + GetPartitionsError::InvalidInput(ref cause) => cause, + GetPartitionsError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by GetPlan +#[derive(Debug, PartialEq)] +pub enum GetPlanError { + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetPlanError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceException" => { + return RusotoError::Service(GetPlanError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetPlanError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetPlanError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetPlanError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetPlanError { + fn description(&self) -> &str { + match *self { + GetPlanError::InternalService(ref cause) => cause, + GetPlanError::InvalidInput(ref cause) => cause, + GetPlanError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by GetResourcePolicy +#[derive(Debug, PartialEq)] +pub enum GetResourcePolicyError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetResourcePolicyError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetResourcePolicyError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(GetResourcePolicyError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetResourcePolicyError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetResourcePolicyError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetResourcePolicyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetResourcePolicyError { + fn description(&self) -> &str { + match *self { + GetResourcePolicyError::EntityNotFound(ref cause) => cause, + GetResourcePolicyError::InternalService(ref cause) => cause, + GetResourcePolicyError::InvalidInput(ref cause) => cause, + GetResourcePolicyError::OperationTimeout(ref cause) => cause, } } } @@ -8068,26 +10158,34 @@ impl Error for GetUserDefinedFunctionsError { } } } -/// Errors returned by ImportCatalogToGlue +/// Errors returned by GetWorkflow #[derive(Debug, PartialEq)] -pub enum ImportCatalogToGlueError { +pub enum GetWorkflowError { + ///

A specified entity does not exist

+ EntityNotFound(String), ///

An internal service error occurred.

InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), ///

The operation timed out.

OperationTimeout(String), } -impl ImportCatalogToGlueError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetWorkflowError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetWorkflowError::EntityNotFound(err.msg)) + } "InternalServiceException" => { - return RusotoError::Service(ImportCatalogToGlueError::InternalService(err.msg)) + return RusotoError::Service(GetWorkflowError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetWorkflowError::InvalidInput(err.msg)) } "OperationTimeoutException" => { - return RusotoError::Service(ImportCatalogToGlueError::OperationTimeout( - err.msg, - )) + return RusotoError::Service(GetWorkflowError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -8096,32 +10194,49 @@ impl ImportCatalogToGlueError { return RusotoError::Unknown(res); } } -impl fmt::Display for ImportCatalogToGlueError { +impl fmt::Display for GetWorkflowError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ImportCatalogToGlueError { +impl Error for GetWorkflowError { fn description(&self) -> &str { match *self { - ImportCatalogToGlueError::InternalService(ref cause) => cause, - ImportCatalogToGlueError::OperationTimeout(ref cause) => cause, + GetWorkflowError::EntityNotFound(ref cause) => cause, + GetWorkflowError::InternalService(ref cause) => cause, + GetWorkflowError::InvalidInput(ref cause) => cause, + GetWorkflowError::OperationTimeout(ref cause) => cause, } } } -/// Errors returned by ListCrawlers +/// Errors returned by GetWorkflowRun #[derive(Debug, PartialEq)] -pub enum ListCrawlersError { +pub enum GetWorkflowRunError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), ///

The operation timed out.

OperationTimeout(String), } -impl ListCrawlersError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl GetWorkflowRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetWorkflowRunError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(GetWorkflowRunError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetWorkflowRunError::InvalidInput(err.msg)) + } "OperationTimeoutException" => { - return RusotoError::Service(ListCrawlersError::OperationTimeout(err.msg)) + return RusotoError::Service(GetWorkflowRunError::OperationTimeout(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -8130,21 +10245,208 @@ impl ListCrawlersError { return RusotoError::Unknown(res); } } -impl fmt::Display for ListCrawlersError { +impl fmt::Display for GetWorkflowRunError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for ListCrawlersError { +impl Error for GetWorkflowRunError { fn description(&self) -> &str { match *self { - ListCrawlersError::OperationTimeout(ref cause) => cause, + GetWorkflowRunError::EntityNotFound(ref cause) => cause, + GetWorkflowRunError::InternalService(ref cause) => cause, + GetWorkflowRunError::InvalidInput(ref cause) => cause, + GetWorkflowRunError::OperationTimeout(ref cause) => cause, } } } -/// Errors returned by ListDevEndpoints +/// Errors returned by GetWorkflowRunProperties #[derive(Debug, PartialEq)] -pub enum ListDevEndpointsError { +pub enum GetWorkflowRunPropertiesError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetWorkflowRunPropertiesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetWorkflowRunPropertiesError::EntityNotFound( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(GetWorkflowRunPropertiesError::InternalService( + err.msg, + )) + } + "InvalidInputException" => { + return RusotoError::Service(GetWorkflowRunPropertiesError::InvalidInput( + err.msg, + )) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetWorkflowRunPropertiesError::OperationTimeout( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetWorkflowRunPropertiesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetWorkflowRunPropertiesError { + fn description(&self) -> &str { + match *self { + GetWorkflowRunPropertiesError::EntityNotFound(ref cause) => cause, + GetWorkflowRunPropertiesError::InternalService(ref cause) => cause, + GetWorkflowRunPropertiesError::InvalidInput(ref cause) => cause, + GetWorkflowRunPropertiesError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by GetWorkflowRuns +#[derive(Debug, PartialEq)] +pub enum GetWorkflowRunsError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl GetWorkflowRunsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(GetWorkflowRunsError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(GetWorkflowRunsError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetWorkflowRunsError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(GetWorkflowRunsError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetWorkflowRunsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetWorkflowRunsError { + fn description(&self) -> &str { + match *self { + GetWorkflowRunsError::EntityNotFound(ref cause) => cause, + GetWorkflowRunsError::InternalService(ref cause) => cause, + GetWorkflowRunsError::InvalidInput(ref cause) => cause, + GetWorkflowRunsError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by ImportCatalogToGlue +#[derive(Debug, PartialEq)] +pub enum ImportCatalogToGlueError { + ///

An internal service error occurred.

+ InternalService(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl ImportCatalogToGlueError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceException" => { + return RusotoError::Service(ImportCatalogToGlueError::InternalService(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(ImportCatalogToGlueError::OperationTimeout( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ImportCatalogToGlueError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ImportCatalogToGlueError { + fn description(&self) -> &str { + match *self { + ImportCatalogToGlueError::InternalService(ref cause) => cause, + ImportCatalogToGlueError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by ListCrawlers +#[derive(Debug, PartialEq)] +pub enum ListCrawlersError { + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl ListCrawlersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "OperationTimeoutException" => { + return RusotoError::Service(ListCrawlersError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListCrawlersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListCrawlersError { + fn description(&self) -> &str { + match *self { + ListCrawlersError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by ListDevEndpoints +#[derive(Debug, PartialEq)] +pub enum ListDevEndpointsError { ///

A specified entity does not exist

EntityNotFound(String), ///

An internal service error occurred.

@@ -8295,6 +10597,51 @@ impl Error for ListTriggersError { } } } +/// Errors returned by ListWorkflows +#[derive(Debug, PartialEq)] +pub enum ListWorkflowsError { + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl ListWorkflowsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceException" => { + return RusotoError::Service(ListWorkflowsError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(ListWorkflowsError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(ListWorkflowsError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListWorkflowsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListWorkflowsError { + fn description(&self) -> &str { + match *self { + ListWorkflowsError::InternalService(ref cause) => cause, + ListWorkflowsError::InvalidInput(ref cause) => cause, + ListWorkflowsError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by PutDataCatalogEncryptionSettings #[derive(Debug, PartialEq)] pub enum PutDataCatalogEncryptionSettingsError { @@ -8407,6 +10754,89 @@ impl Error for PutResourcePolicyError { } } } +/// Errors returned by PutWorkflowRunProperties +#[derive(Debug, PartialEq)] +pub enum PutWorkflowRunPropertiesError { + ///

A resource to be created or added already exists.

+ AlreadyExists(String), + ///

Two processes are trying to modify a resource simultaneously.

+ ConcurrentModification(String), + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), + ///

A resource numerical limit was exceeded.

+ ResourceNumberLimitExceeded(String), +} + +impl PutWorkflowRunPropertiesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AlreadyExistsException" => { + return RusotoError::Service(PutWorkflowRunPropertiesError::AlreadyExists( + err.msg, + )) + } + "ConcurrentModificationException" => { + return RusotoError::Service( + PutWorkflowRunPropertiesError::ConcurrentModification(err.msg), + ) + } + "EntityNotFoundException" => { + return RusotoError::Service(PutWorkflowRunPropertiesError::EntityNotFound( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(PutWorkflowRunPropertiesError::InternalService( + err.msg, + )) + } + "InvalidInputException" => { + return RusotoError::Service(PutWorkflowRunPropertiesError::InvalidInput( + err.msg, + )) + } + "OperationTimeoutException" => { + return RusotoError::Service(PutWorkflowRunPropertiesError::OperationTimeout( + err.msg, + )) + } + "ResourceNumberLimitExceededException" => { + return RusotoError::Service( + PutWorkflowRunPropertiesError::ResourceNumberLimitExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutWorkflowRunPropertiesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutWorkflowRunPropertiesError { + fn description(&self) -> &str { + match *self { + PutWorkflowRunPropertiesError::AlreadyExists(ref cause) => cause, + PutWorkflowRunPropertiesError::ConcurrentModification(ref cause) => cause, + PutWorkflowRunPropertiesError::EntityNotFound(ref cause) => cause, + PutWorkflowRunPropertiesError::InternalService(ref cause) => cause, + PutWorkflowRunPropertiesError::InvalidInput(ref cause) => cause, + PutWorkflowRunPropertiesError::OperationTimeout(ref cause) => cause, + PutWorkflowRunPropertiesError::ResourceNumberLimitExceeded(ref cause) => cause, + } + } +} /// Errors returned by ResetJobBookmark #[derive(Debug, PartialEq)] pub enum ResetJobBookmarkError { @@ -8458,9 +10888,54 @@ impl Error for ResetJobBookmarkError { } } } -/// Errors returned by StartCrawler +/// Errors returned by SearchTables #[derive(Debug, PartialEq)] -pub enum StartCrawlerError { +pub enum SearchTablesError { + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl SearchTablesError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceException" => { + return RusotoError::Service(SearchTablesError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(SearchTablesError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(SearchTablesError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for SearchTablesError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for SearchTablesError { + fn description(&self) -> &str { + match *self { + SearchTablesError::InternalService(ref cause) => cause, + SearchTablesError::InvalidInput(ref cause) => cause, + SearchTablesError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by StartCrawler +#[derive(Debug, PartialEq)] +pub enum StartCrawlerError { ///

The operation cannot be performed because the crawler is already running.

CrawlerRunning(String), ///

A specified entity does not exist

@@ -8566,6 +11041,132 @@ impl Error for StartCrawlerScheduleError { } } } +/// Errors returned by StartExportLabelsTaskRun +#[derive(Debug, PartialEq)] +pub enum StartExportLabelsTaskRunError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl StartExportLabelsTaskRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(StartExportLabelsTaskRunError::EntityNotFound( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(StartExportLabelsTaskRunError::InternalService( + err.msg, + )) + } + "InvalidInputException" => { + return RusotoError::Service(StartExportLabelsTaskRunError::InvalidInput( + err.msg, + )) + } + "OperationTimeoutException" => { + return RusotoError::Service(StartExportLabelsTaskRunError::OperationTimeout( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartExportLabelsTaskRunError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartExportLabelsTaskRunError { + fn description(&self) -> &str { + match *self { + StartExportLabelsTaskRunError::EntityNotFound(ref cause) => cause, + StartExportLabelsTaskRunError::InternalService(ref cause) => cause, + StartExportLabelsTaskRunError::InvalidInput(ref cause) => cause, + StartExportLabelsTaskRunError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by StartImportLabelsTaskRun +#[derive(Debug, PartialEq)] +pub enum StartImportLabelsTaskRunError { + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), + ///

A resource numerical limit was exceeded.

+ ResourceNumberLimitExceeded(String), +} + +impl StartImportLabelsTaskRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "EntityNotFoundException" => { + return RusotoError::Service(StartImportLabelsTaskRunError::EntityNotFound( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(StartImportLabelsTaskRunError::InternalService( + err.msg, + )) + } + "InvalidInputException" => { + return RusotoError::Service(StartImportLabelsTaskRunError::InvalidInput( + err.msg, + )) + } + "OperationTimeoutException" => { + return RusotoError::Service(StartImportLabelsTaskRunError::OperationTimeout( + err.msg, + )) + } + "ResourceNumberLimitExceededException" => { + return RusotoError::Service( + StartImportLabelsTaskRunError::ResourceNumberLimitExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartImportLabelsTaskRunError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartImportLabelsTaskRunError { + fn description(&self) -> &str { + match *self { + StartImportLabelsTaskRunError::EntityNotFound(ref cause) => cause, + StartImportLabelsTaskRunError::InternalService(ref cause) => cause, + StartImportLabelsTaskRunError::InvalidInput(ref cause) => cause, + StartImportLabelsTaskRunError::OperationTimeout(ref cause) => cause, + StartImportLabelsTaskRunError::ResourceNumberLimitExceeded(ref cause) => cause, + } + } +} /// Errors returned by StartJobRun #[derive(Debug, PartialEq)] pub enum StartJobRunError { @@ -8631,6 +11232,150 @@ impl Error for StartJobRunError { } } } +/// Errors returned by StartMLEvaluationTaskRun +#[derive(Debug, PartialEq)] +pub enum StartMLEvaluationTaskRunError { + ///

Too many jobs are being run concurrently.

+ ConcurrentRunsExceeded(String), + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The machine learning transform is not ready to run.

+ MLTransformNotReady(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl StartMLEvaluationTaskRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentRunsExceededException" => { + return RusotoError::Service( + StartMLEvaluationTaskRunError::ConcurrentRunsExceeded(err.msg), + ) + } + "EntityNotFoundException" => { + return RusotoError::Service(StartMLEvaluationTaskRunError::EntityNotFound( + err.msg, + )) + } + "InternalServiceException" => { + return RusotoError::Service(StartMLEvaluationTaskRunError::InternalService( + err.msg, + )) + } + "InvalidInputException" => { + return RusotoError::Service(StartMLEvaluationTaskRunError::InvalidInput( + err.msg, + )) + } + "MLTransformNotReadyException" => { + return RusotoError::Service( + StartMLEvaluationTaskRunError::MLTransformNotReady(err.msg), + ) + } + "OperationTimeoutException" => { + return RusotoError::Service(StartMLEvaluationTaskRunError::OperationTimeout( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartMLEvaluationTaskRunError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartMLEvaluationTaskRunError { + fn description(&self) -> &str { + match *self { + StartMLEvaluationTaskRunError::ConcurrentRunsExceeded(ref cause) => cause, + StartMLEvaluationTaskRunError::EntityNotFound(ref cause) => cause, + StartMLEvaluationTaskRunError::InternalService(ref cause) => cause, + StartMLEvaluationTaskRunError::InvalidInput(ref cause) => cause, + StartMLEvaluationTaskRunError::MLTransformNotReady(ref cause) => cause, + StartMLEvaluationTaskRunError::OperationTimeout(ref cause) => cause, + } + } +} +/// Errors returned by StartMLLabelingSetGenerationTaskRun +#[derive(Debug, PartialEq)] +pub enum StartMLLabelingSetGenerationTaskRunError { + ///

Too many jobs are being run concurrently.

+ ConcurrentRunsExceeded(String), + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl StartMLLabelingSetGenerationTaskRunError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentRunsExceededException" => { + return RusotoError::Service( + StartMLLabelingSetGenerationTaskRunError::ConcurrentRunsExceeded(err.msg), + ) + } + "EntityNotFoundException" => { + return RusotoError::Service( + StartMLLabelingSetGenerationTaskRunError::EntityNotFound(err.msg), + ) + } + "InternalServiceException" => { + return RusotoError::Service( + StartMLLabelingSetGenerationTaskRunError::InternalService(err.msg), + ) + } + "InvalidInputException" => { + return RusotoError::Service( + StartMLLabelingSetGenerationTaskRunError::InvalidInput(err.msg), + ) + } + "OperationTimeoutException" => { + return RusotoError::Service( + StartMLLabelingSetGenerationTaskRunError::OperationTimeout(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartMLLabelingSetGenerationTaskRunError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartMLLabelingSetGenerationTaskRunError { + fn description(&self) -> &str { + match *self { + StartMLLabelingSetGenerationTaskRunError::ConcurrentRunsExceeded(ref cause) => cause, + StartMLLabelingSetGenerationTaskRunError::EntityNotFound(ref cause) => cause, + StartMLLabelingSetGenerationTaskRunError::InternalService(ref cause) => cause, + StartMLLabelingSetGenerationTaskRunError::InvalidInput(ref cause) => cause, + StartMLLabelingSetGenerationTaskRunError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by StartTrigger #[derive(Debug, PartialEq)] pub enum StartTriggerError { @@ -8696,6 +11441,73 @@ impl Error for StartTriggerError { } } } +/// Errors returned by StartWorkflowRun +#[derive(Debug, PartialEq)] +pub enum StartWorkflowRunError { + ///

Too many jobs are being run concurrently.

+ ConcurrentRunsExceeded(String), + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), + ///

A resource numerical limit was exceeded.

+ ResourceNumberLimitExceeded(String), +} + +impl StartWorkflowRunError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentRunsExceededException" => { + return RusotoError::Service(StartWorkflowRunError::ConcurrentRunsExceeded( + err.msg, + )) + } + "EntityNotFoundException" => { + return RusotoError::Service(StartWorkflowRunError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(StartWorkflowRunError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(StartWorkflowRunError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(StartWorkflowRunError::OperationTimeout(err.msg)) + } + "ResourceNumberLimitExceededException" => { + return RusotoError::Service( + StartWorkflowRunError::ResourceNumberLimitExceeded(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartWorkflowRunError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartWorkflowRunError { + fn description(&self) -> &str { + match *self { + StartWorkflowRunError::ConcurrentRunsExceeded(ref cause) => cause, + StartWorkflowRunError::EntityNotFound(ref cause) => cause, + StartWorkflowRunError::InternalService(ref cause) => cause, + StartWorkflowRunError::InvalidInput(ref cause) => cause, + StartWorkflowRunError::OperationTimeout(ref cause) => cause, + StartWorkflowRunError::ResourceNumberLimitExceeded(ref cause) => cause, + } + } +} /// Errors returned by StopCrawler #[derive(Debug, PartialEq)] pub enum StopCrawlerError { @@ -9352,6 +12164,63 @@ impl Error for UpdateJobError { } } } +/// Errors returned by UpdateMLTransform +#[derive(Debug, PartialEq)] +pub enum UpdateMLTransformError { + ///

Access to a resource was denied.

+ AccessDenied(String), + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl UpdateMLTransformError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(UpdateMLTransformError::AccessDenied(err.msg)) + } + "EntityNotFoundException" => { + return RusotoError::Service(UpdateMLTransformError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(UpdateMLTransformError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(UpdateMLTransformError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(UpdateMLTransformError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateMLTransformError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateMLTransformError { + fn description(&self) -> &str { + match *self { + UpdateMLTransformError::AccessDenied(ref cause) => cause, + UpdateMLTransformError::EntityNotFound(ref cause) => cause, + UpdateMLTransformError::InternalService(ref cause) => cause, + UpdateMLTransformError::InvalidInput(ref cause) => cause, + UpdateMLTransformError::OperationTimeout(ref cause) => cause, + } + } +} /// Errors returned by UpdatePartition #[derive(Debug, PartialEq)] pub enum UpdatePartitionError { @@ -9606,6 +12475,65 @@ impl Error for UpdateUserDefinedFunctionError { } } } +/// Errors returned by UpdateWorkflow +#[derive(Debug, PartialEq)] +pub enum UpdateWorkflowError { + ///

Two processes are trying to modify a resource simultaneously.

+ ConcurrentModification(String), + ///

A specified entity does not exist

+ EntityNotFound(String), + ///

An internal service error occurred.

+ InternalService(String), + ///

The input provided was not valid.

+ InvalidInput(String), + ///

The operation timed out.

+ OperationTimeout(String), +} + +impl UpdateWorkflowError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ConcurrentModificationException" => { + return RusotoError::Service(UpdateWorkflowError::ConcurrentModification( + err.msg, + )) + } + "EntityNotFoundException" => { + return RusotoError::Service(UpdateWorkflowError::EntityNotFound(err.msg)) + } + "InternalServiceException" => { + return RusotoError::Service(UpdateWorkflowError::InternalService(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(UpdateWorkflowError::InvalidInput(err.msg)) + } + "OperationTimeoutException" => { + return RusotoError::Service(UpdateWorkflowError::OperationTimeout(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateWorkflowError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateWorkflowError { + fn description(&self) -> &str { + match *self { + UpdateWorkflowError::ConcurrentModification(ref cause) => cause, + UpdateWorkflowError::EntityNotFound(ref cause) => cause, + UpdateWorkflowError::InternalService(ref cause) => cause, + UpdateWorkflowError::InvalidInput(ref cause) => cause, + UpdateWorkflowError::OperationTimeout(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the AWS Glue API. AWS Glue clients implement this trait. pub trait Glue { ///

Creates one or more partitions in a batch operation.

@@ -9626,7 +12554,7 @@ pub trait Glue { input: BatchDeletePartitionRequest, ) -> RusotoFuture; - ///

Deletes multiple tables at once.

After completing this operation, you will no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

+ ///

Deletes multiple tables at once.

After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

fn batch_delete_table( &self, input: BatchDeleteTableRequest, @@ -9644,7 +12572,7 @@ pub trait Glue { input: BatchGetCrawlersRequest, ) -> RusotoFuture; - ///

Returns a list of resource metadata for a given list of DevEndpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

+ ///

Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

fn batch_get_dev_endpoints( &self, input: BatchGetDevEndpointsRequest, @@ -9668,12 +12596,24 @@ pub trait Glue { input: BatchGetTriggersRequest, ) -> RusotoFuture; + ///

Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

+ fn batch_get_workflows( + &self, + input: BatchGetWorkflowsRequest, + ) -> RusotoFuture; + ///

Stops one or more job runs for a specified job definition.

fn batch_stop_job_run( &self, input: BatchStopJobRunRequest, ) -> RusotoFuture; + ///

Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run's parent transform's TransformID and the task run's TaskRunId.

+ fn cancel_ml_task_run( + &self, + input: CancelMLTaskRunRequest, + ) -> RusotoFuture; + ///

Creates a classifier in the user's account. This can be a GrokClassifier, an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which field of the request is present.

fn create_classifier( &self, @@ -9698,7 +12638,7 @@ pub trait Glue { input: CreateDatabaseRequest, ) -> RusotoFuture; - ///

Creates a new DevEndpoint.

+ ///

Creates a new development endpoint.

fn create_dev_endpoint( &self, input: CreateDevEndpointRequest, @@ -9710,6 +12650,12 @@ pub trait Glue { input: CreateJobRequest, ) -> RusotoFuture; + ///

Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it.

Call this operation as the first step in the process of using a machine learning transform (such as the FindMatches transform) for deduplicating data. You can provide an optional Description, in addition to the parameters that you want to use for your algorithm.

You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include Role, and optionally, AllocatedCapacity, Timeout, and MaxRetries. For more information, see Jobs.

+ fn create_ml_transform( + &self, + input: CreateMLTransformRequest, + ) -> RusotoFuture; + ///

Creates a new partition.

fn create_partition( &self, @@ -9722,7 +12668,7 @@ pub trait Glue { input: CreateScriptRequest, ) -> RusotoFuture; - ///

Creates a new security configuration.

+ ///

Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints.

fn create_security_configuration( &self, input: CreateSecurityConfigurationRequest, @@ -9746,6 +12692,12 @@ pub trait Glue { input: CreateUserDefinedFunctionRequest, ) -> RusotoFuture; + ///

Creates a new workflow.

+ fn create_workflow( + &self, + input: CreateWorkflowRequest, + ) -> RusotoFuture; + ///

Removes a classifier from the Data Catalog.

fn delete_classifier( &self, @@ -9764,13 +12716,13 @@ pub trait Glue { input: DeleteCrawlerRequest, ) -> RusotoFuture; - ///

Removes a specified Database from a Data Catalog.

After completing this operation, you will no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

+ ///

Removes a specified database from a Data Catalog.

After completing this operation, you no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

fn delete_database( &self, input: DeleteDatabaseRequest, ) -> RusotoFuture; - ///

Deletes a specified DevEndpoint.

+ ///

Deletes a specified development endpoint.

fn delete_dev_endpoint( &self, input: DeleteDevEndpointRequest, @@ -9782,6 +12734,12 @@ pub trait Glue { input: DeleteJobRequest, ) -> RusotoFuture; + ///

Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms. However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.

+ fn delete_ml_transform( + &self, + input: DeleteMLTransformRequest, + ) -> RusotoFuture; + ///

Deletes a specified partition.

fn delete_partition( &self, @@ -9800,7 +12758,7 @@ pub trait Glue { input: DeleteSecurityConfigurationRequest, ) -> RusotoFuture; - ///

Removes a table definition from the Data Catalog.

After completing this operation, you will no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

+ ///

Removes a table definition from the Data Catalog.

After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

fn delete_table( &self, input: DeleteTableRequest, @@ -9824,6 +12782,12 @@ pub trait Glue { input: DeleteUserDefinedFunctionRequest, ) -> RusotoFuture; + ///

Deletes a workflow.

+ fn delete_workflow( + &self, + input: DeleteWorkflowRequest, + ) -> RusotoFuture; + ///

Retrieves the status of a migration operation.

fn get_catalog_import_status( &self, @@ -9884,7 +12848,7 @@ pub trait Glue { input: GetDatabaseRequest, ) -> RusotoFuture; - ///

Retrieves all Databases defined in a given Data Catalog.

+ ///

Retrieves all databases defined in a given Data Catalog.

fn get_databases( &self, input: GetDatabasesRequest, @@ -9896,13 +12860,13 @@ pub trait Glue { input: GetDataflowGraphRequest, ) -> RusotoFuture; - ///

Retrieves information about a specified DevEndpoint.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address, and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

+ ///

Retrieves information about a specified development endpoint.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address, and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

fn get_dev_endpoint( &self, input: GetDevEndpointRequest, ) -> RusotoFuture; - ///

Retrieves all the DevEndpoints in this AWS account.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

+ ///

Retrieves all the development endpoints in this AWS account.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

fn get_dev_endpoints( &self, input: GetDevEndpointsRequest, @@ -9911,6 +12875,12 @@ pub trait Glue { ///

Retrieves an existing job definition.

fn get_job(&self, input: GetJobRequest) -> RusotoFuture; + ///

Returns information on a job bookmark entry.

+ fn get_job_bookmark( + &self, + input: GetJobBookmarkRequest, + ) -> RusotoFuture; + ///

Retrieves the metadata for a given job run.

fn get_job_run( &self, @@ -9926,6 +12896,30 @@ pub trait Glue { ///

Retrieves all current job definitions.

fn get_jobs(&self, input: GetJobsRequest) -> RusotoFuture; + ///

Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform's TransformID.

+ fn get_ml_task_run( + &self, + input: GetMLTaskRunRequest, + ) -> RusotoFuture; + + ///

Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform's TransformID and other optional parameters as documented in this section.

This operation returns a list of historic runs and must be paginated.

+ fn get_ml_task_runs( + &self, + input: GetMLTaskRunsRequest, + ) -> RusotoFuture; + + ///

Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform.

+ fn get_ml_transform( + &self, + input: GetMLTransformRequest, + ) -> RusotoFuture; + + ///

Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms.

+ fn get_ml_transforms( + &self, + input: GetMLTransformsRequest, + ) -> RusotoFuture; + ///

Creates mappings.

fn get_mapping( &self, @@ -10006,13 +13000,37 @@ pub trait Glue { input: GetUserDefinedFunctionRequest, ) -> RusotoFuture; - ///

Retrieves a multiple function definitions from the Data Catalog.

+ ///

Retrieves multiple function definitions from the Data Catalog.

fn get_user_defined_functions( &self, input: GetUserDefinedFunctionsRequest, ) -> RusotoFuture; - ///

Imports an existing Athena Data Catalog to AWS Glue

+ ///

Retrieves resource metadata for a workflow.

+ fn get_workflow( + &self, + input: GetWorkflowRequest, + ) -> RusotoFuture; + + ///

Retrieves the metadata for a given workflow run.

+ fn get_workflow_run( + &self, + input: GetWorkflowRunRequest, + ) -> RusotoFuture; + + ///

Retrieves the workflow run properties which were set during the run.

+ fn get_workflow_run_properties( + &self, + input: GetWorkflowRunPropertiesRequest, + ) -> RusotoFuture; + + ///

Retrieves metadata for all runs of a given workflow.

+ fn get_workflow_runs( + &self, + input: GetWorkflowRunsRequest, + ) -> RusotoFuture; + + ///

Imports an existing Amazon Athena Data Catalog to AWS Glue

fn import_catalog_to_glue( &self, input: ImportCatalogToGlueRequest, @@ -10039,6 +13057,12 @@ pub trait Glue { input: ListTriggersRequest, ) -> RusotoFuture; + ///

Lists names of workflows created in the account.

+ fn list_workflows( + &self, + input: ListWorkflowsRequest, + ) -> RusotoFuture; + ///

Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.

fn put_data_catalog_encryption_settings( &self, @@ -10051,12 +13075,24 @@ pub trait Glue { input: PutResourcePolicyRequest, ) -> RusotoFuture; + ///

Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.

+ fn put_workflow_run_properties( + &self, + input: PutWorkflowRunPropertiesRequest, + ) -> RusotoFuture; + ///

Resets a bookmark entry.

fn reset_job_bookmark( &self, input: ResetJobBookmarkRequest, ) -> RusotoFuture; + ///

Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.

You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.

+ fn search_tables( + &self, + input: SearchTablesRequest, + ) -> RusotoFuture; + ///

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException.

fn start_crawler( &self, @@ -10069,18 +13105,51 @@ pub trait Glue { input: StartCrawlerScheduleRequest, ) -> RusotoFuture; + ///

Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId. You can check on the status of your task run by calling the GetMLTaskRun API.

+ fn start_export_labels_task_run( + &self, + input: StartExportLabelsTaskRunRequest, + ) -> RusotoFuture; + + ///

Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform.

After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called 'labeling' in the machine learning workflows). In the case of the FindMatches transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?” After the labeling process is finished, users upload their answers/labels with a call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.

By default, StartMLLabelingSetGenerationTaskRun continually learns from and combines all labels that you upload unless you set Replace to true. If you set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.

You can check on the status of your task run by calling the GetMLTaskRun operation.

+ fn start_import_labels_task_run( + &self, + input: StartImportLabelsTaskRunRequest, + ) -> RusotoFuture; + ///

Starts a job run using a job definition.

fn start_job_run( &self, input: StartJobRunRequest, ) -> RusotoFuture; + ///

Starts a task to estimate the quality of the transform.

When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.

Returns a unique identifier for the run. You can call GetMLTaskRun to get more information about the stats of the EvaluationTaskRun.

+ fn start_ml_evaluation_task_run( + &self, + input: StartMLEvaluationTaskRunRequest, + ) -> RusotoFuture; + + ///

Starts the active learning workflow for your machine learning transform to improve the transform's quality by generating label sets and adding labels.

When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have generated a "labeling set" or a set of questions for humans to answer.

In the case of the FindMatches transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?”

After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.

+ fn start_ml_labeling_set_generation_task_run( + &self, + input: StartMLLabelingSetGenerationTaskRunRequest, + ) -> RusotoFuture< + StartMLLabelingSetGenerationTaskRunResponse, + StartMLLabelingSetGenerationTaskRunError, + >; + ///

Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.

fn start_trigger( &self, input: StartTriggerRequest, ) -> RusotoFuture; + ///

Starts a new run of the specified workflow.

+ fn start_workflow_run( + &self, + input: StartWorkflowRunRequest, + ) -> RusotoFuture; + ///

If the specified crawler is running, stops the crawl.

fn stop_crawler( &self, @@ -10141,7 +13210,7 @@ pub trait Glue { input: UpdateDatabaseRequest, ) -> RusotoFuture; - ///

Updates a specified DevEndpoint.

+ ///

Updates a specified development endpoint.

fn update_dev_endpoint( &self, input: UpdateDevEndpointRequest, @@ -10153,6 +13222,12 @@ pub trait Glue { input: UpdateJobRequest, ) -> RusotoFuture; + ///

Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.

After calling this operation, you can call the StartMLEvaluationTaskRun operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).

+ fn update_ml_transform( + &self, + input: UpdateMLTransformRequest, + ) -> RusotoFuture; + ///

Updates a partition.

fn update_partition( &self, @@ -10176,6 +13251,12 @@ pub trait Glue { &self, input: UpdateUserDefinedFunctionRequest, ) -> RusotoFuture; + + ///

Updates an existing workflow.

+ fn update_workflow( + &self, + input: UpdateWorkflowRequest, + ) -> RusotoFuture; } /// A client for the AWS Glue API. #[derive(Clone)] @@ -10189,10 +13270,7 @@ impl GlueClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> GlueClient { - GlueClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -10206,10 +13284,14 @@ impl GlueClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - GlueClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> GlueClient { + GlueClient { client, region } } } @@ -10298,7 +13380,7 @@ impl Glue for GlueClient { }) } - ///

Deletes multiple tables at once.

After completing this operation, you will no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

+ ///

Deletes multiple tables at once.

After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

fn batch_delete_table( &self, input: BatchDeleteTableRequest, @@ -10382,7 +13464,7 @@ impl Glue for GlueClient { }) } - ///

Returns a list of resource metadata for a given list of DevEndpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

+ ///

Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

fn batch_get_dev_endpoints( &self, input: BatchGetDevEndpointsRequest, @@ -10497,6 +13579,35 @@ impl Glue for GlueClient { }) } + ///

Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

+ fn batch_get_workflows( + &self, + input: BatchGetWorkflowsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.BatchGetWorkflows"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(BatchGetWorkflowsError::from_response(response))), + ) + } + }) + } + ///

Stops one or more job runs for a specified job definition.

fn batch_stop_job_run( &self, @@ -10525,6 +13636,35 @@ impl Glue for GlueClient { }) } + ///

Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run's parent transform's TransformID and the task run's TaskRunId.

+ fn cancel_ml_task_run( + &self, + input: CancelMLTaskRunRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.CancelMLTaskRun"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CancelMLTaskRunError::from_response(response))), + ) + } + }) + } + ///

Creates a classifier in the user's account. This can be a GrokClassifier, an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which field of the request is present.

fn create_classifier( &self, @@ -10641,7 +13781,7 @@ impl Glue for GlueClient { }) } - ///

Creates a new DevEndpoint.

+ ///

Creates a new development endpoint.

fn create_dev_endpoint( &self, input: CreateDevEndpointRequest, @@ -10699,6 +13839,35 @@ impl Glue for GlueClient { }) } + ///

Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it.

Call this operation as the first step in the process of using a machine learning transform (such as the FindMatches transform) for deduplicating data. You can provide an optional Description, in addition to the parameters that you want to use for your algorithm.

You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include Role, and optionally, AllocatedCapacity, Timeout, and MaxRetries. For more information, see Jobs.

+ fn create_ml_transform( + &self, + input: CreateMLTransformRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.CreateMLTransform"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateMLTransformError::from_response(response))), + ) + } + }) + } + ///

Creates a new partition.

fn create_partition( &self, @@ -10757,7 +13926,7 @@ impl Glue for GlueClient { }) } - ///

Creates a new security configuration.

+ ///

Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints.

fn create_security_configuration( &self, input: CreateSecurityConfigurationRequest, @@ -10867,6 +14036,35 @@ impl Glue for GlueClient { }) } + ///

Creates a new workflow.

+ fn create_workflow( + &self, + input: CreateWorkflowRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.CreateWorkflow"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateWorkflowError::from_response(response))), + ) + } + }) + } + ///

Removes a classifier from the Data Catalog.

fn delete_classifier( &self, @@ -10954,7 +14152,7 @@ impl Glue for GlueClient { }) } - ///

Removes a specified Database from a Data Catalog.

After completing this operation, you will no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

+ ///

Removes a specified database from a Data Catalog.

After completing this operation, you no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

fn delete_database( &self, input: DeleteDatabaseRequest, @@ -10983,7 +14181,7 @@ impl Glue for GlueClient { }) } - ///

Deletes a specified DevEndpoint.

+ ///

Deletes a specified development endpoint.

fn delete_dev_endpoint( &self, input: DeleteDevEndpointRequest, @@ -11006,21 +14204,50 @@ impl Glue for GlueClient { response .buffer() .from_err() - .and_then(|response| Err(DeleteDevEndpointError::from_response(response))), + .and_then(|response| Err(DeleteDevEndpointError::from_response(response))), + ) + } + }) + } + + ///

Deletes a specified job definition. If the job definition is not found, no exception is thrown.

+ fn delete_job( + &self, + input: DeleteJobRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.DeleteJob"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteJobError::from_response(response))), ) } }) } - ///

Deletes a specified job definition. If the job definition is not found, no exception is thrown.

- fn delete_job( + ///

Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms. However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.

+ fn delete_ml_transform( &self, - input: DeleteJobRequest, - ) -> RusotoFuture { + input: DeleteMLTransformRequest, + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); - request.add_header("x-amz-target", "AWSGlue.DeleteJob"); + request.add_header("x-amz-target", "AWSGlue.DeleteMLTransform"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded)); @@ -11028,14 +14255,14 @@ impl Glue for GlueClient { if response.status.is_success() { Box::new(response.buffer().from_err().and_then(|response| { proto::json::ResponsePayload::new(&response) - .deserialize::() + .deserialize::() })) } else { Box::new( response .buffer() .from_err() - .and_then(|response| Err(DeleteJobError::from_response(response))), + .and_then(|response| Err(DeleteMLTransformError::from_response(response))), ) } }) @@ -11124,7 +14351,7 @@ impl Glue for GlueClient { }) } - ///

Removes a table definition from the Data Catalog.

After completing this operation, you will no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

+ ///

Removes a table definition from the Data Catalog.

After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

fn delete_table( &self, input: DeleteTableRequest, @@ -11237,6 +14464,35 @@ impl Glue for GlueClient { }) } + ///

Deletes a workflow.

+ fn delete_workflow( + &self, + input: DeleteWorkflowRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.DeleteWorkflow"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteWorkflowError::from_response(response))), + ) + } + }) + } + ///

Retrieves the status of a migration operation.

fn get_catalog_import_status( &self, @@ -11526,7 +14782,7 @@ impl Glue for GlueClient { }) } - ///

Retrieves all Databases defined in a given Data Catalog.

+ ///

Retrieves all databases defined in a given Data Catalog.

fn get_databases( &self, input: GetDatabasesRequest, @@ -11584,7 +14840,7 @@ impl Glue for GlueClient { }) } - ///

Retrieves information about a specified DevEndpoint.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address, and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

+ ///

Retrieves information about a specified development endpoint.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address, and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

fn get_dev_endpoint( &self, input: GetDevEndpointRequest, @@ -11613,7 +14869,7 @@ impl Glue for GlueClient { }) } - ///

Retrieves all the DevEndpoints in this AWS account.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

+ ///

Retrieves all the development endpoints in this AWS account.

When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

fn get_dev_endpoints( &self, input: GetDevEndpointsRequest, @@ -11667,6 +14923,35 @@ impl Glue for GlueClient { }) } + ///

Returns information on a job bookmark entry.

+ fn get_job_bookmark( + &self, + input: GetJobBookmarkRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetJobBookmark"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetJobBookmarkError::from_response(response))), + ) + } + }) + } + ///

Retrieves the metadata for a given job run.

fn get_job_run( &self, @@ -11750,6 +15035,122 @@ impl Glue for GlueClient { }) } + ///

Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform's TransformID.

+ fn get_ml_task_run( + &self, + input: GetMLTaskRunRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetMLTaskRun"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMLTaskRunError::from_response(response))), + ) + } + }) + } + + ///

Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform's TransformID and other optional parameters as documented in this section.

This operation returns a list of historic runs and must be paginated.

+ fn get_ml_task_runs( + &self, + input: GetMLTaskRunsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetMLTaskRuns"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMLTaskRunsError::from_response(response))), + ) + } + }) + } + + ///

Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform.

+ fn get_ml_transform( + &self, + input: GetMLTransformRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetMLTransform"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMLTransformError::from_response(response))), + ) + } + }) + } + + ///

Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms.

+ fn get_ml_transforms( + &self, + input: GetMLTransformsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetMLTransforms"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMLTransformsError::from_response(response))), + ) + } + }) + } + ///

Creates mappings.

fn get_mapping( &self, @@ -12069,25 +15470,137 @@ impl Glue for GlueClient { proto::json::ResponsePayload::new(&response).deserialize::() })) } else { - Box::new( - response - .buffer() - .from_err() - .and_then(|response| Err(GetTagsError::from_response(response))), - ) + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetTagsError::from_response(response))), + ) + } + }) + } + + ///

Retrieves the definition of a trigger.

+ fn get_trigger( + &self, + input: GetTriggerRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetTrigger"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetTriggerError::from_response(response))), + ) + } + }) + } + + ///

Gets all the triggers associated with a job.

+ fn get_triggers( + &self, + input: GetTriggersRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetTriggers"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetTriggersError::from_response(response))), + ) + } + }) + } + + ///

Retrieves a specified function definition from the Data Catalog.

+ fn get_user_defined_function( + &self, + input: GetUserDefinedFunctionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetUserDefinedFunction"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(GetUserDefinedFunctionError::from_response(response)) + }), + ) + } + }) + } + + ///

Retrieves multiple function definitions from the Data Catalog.

+ fn get_user_defined_functions( + &self, + input: GetUserDefinedFunctionsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.GetUserDefinedFunctions"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetUserDefinedFunctionsError::from_response(response)) + })) } }) } - ///

Retrieves the definition of a trigger.

- fn get_trigger( + ///

Retrieves resource metadata for a workflow.

+ fn get_workflow( &self, - input: GetTriggerRequest, - ) -> RusotoFuture { + input: GetWorkflowRequest, + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); - request.add_header("x-amz-target", "AWSGlue.GetTrigger"); + request.add_header("x-amz-target", "AWSGlue.GetWorkflow"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded)); @@ -12095,28 +15608,28 @@ impl Glue for GlueClient { if response.status.is_success() { Box::new(response.buffer().from_err().and_then(|response| { proto::json::ResponsePayload::new(&response) - .deserialize::() + .deserialize::() })) } else { Box::new( response .buffer() .from_err() - .and_then(|response| Err(GetTriggerError::from_response(response))), + .and_then(|response| Err(GetWorkflowError::from_response(response))), ) } }) } - ///

Gets all the triggers associated with a job.

- fn get_triggers( + ///

Retrieves the metadata for a given workflow run.

+ fn get_workflow_run( &self, - input: GetTriggersRequest, - ) -> RusotoFuture { + input: GetWorkflowRunRequest, + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); - request.add_header("x-amz-target", "AWSGlue.GetTriggers"); + request.add_header("x-amz-target", "AWSGlue.GetWorkflowRun"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded)); @@ -12124,28 +15637,28 @@ impl Glue for GlueClient { if response.status.is_success() { Box::new(response.buffer().from_err().and_then(|response| { proto::json::ResponsePayload::new(&response) - .deserialize::() + .deserialize::() })) } else { Box::new( response .buffer() .from_err() - .and_then(|response| Err(GetTriggersError::from_response(response))), + .and_then(|response| Err(GetWorkflowRunError::from_response(response))), ) } }) } - ///

Retrieves a specified function definition from the Data Catalog.

- fn get_user_defined_function( + ///

Retrieves the workflow run properties which were set during the run.

+ fn get_workflow_run_properties( &self, - input: GetUserDefinedFunctionRequest, - ) -> RusotoFuture { + input: GetWorkflowRunPropertiesRequest, + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); - request.add_header("x-amz-target", "AWSGlue.GetUserDefinedFunction"); + request.add_header("x-amz-target", "AWSGlue.GetWorkflowRunProperties"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded)); @@ -12153,27 +15666,25 @@ impl Glue for GlueClient { if response.status.is_success() { Box::new(response.buffer().from_err().and_then(|response| { proto::json::ResponsePayload::new(&response) - .deserialize::() + .deserialize::() })) } else { - Box::new( - response.buffer().from_err().and_then(|response| { - Err(GetUserDefinedFunctionError::from_response(response)) - }), - ) + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetWorkflowRunPropertiesError::from_response(response)) + })) } }) } - ///

Retrieves a multiple function definitions from the Data Catalog.

- fn get_user_defined_functions( + ///

Retrieves metadata for all runs of a given workflow.

+ fn get_workflow_runs( &self, - input: GetUserDefinedFunctionsRequest, - ) -> RusotoFuture { + input: GetWorkflowRunsRequest, + ) -> RusotoFuture { let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); - request.add_header("x-amz-target", "AWSGlue.GetUserDefinedFunctions"); + request.add_header("x-amz-target", "AWSGlue.GetWorkflowRuns"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded)); @@ -12181,17 +15692,20 @@ impl Glue for GlueClient { if response.status.is_success() { Box::new(response.buffer().from_err().and_then(|response| { proto::json::ResponsePayload::new(&response) - .deserialize::() + .deserialize::() })) } else { - Box::new(response.buffer().from_err().and_then(|response| { - Err(GetUserDefinedFunctionsError::from_response(response)) - })) + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetWorkflowRunsError::from_response(response))), + ) } }) } - ///

Imports an existing Athena Data Catalog to AWS Glue

+ ///

Imports an existing Amazon Athena Data Catalog to AWS Glue

fn import_catalog_to_glue( &self, input: ImportCatalogToGlueRequest, @@ -12332,6 +15846,35 @@ impl Glue for GlueClient { }) } + ///

Lists names of workflows created in the account.

+ fn list_workflows( + &self, + input: ListWorkflowsRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.ListWorkflows"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListWorkflowsError::from_response(response))), + ) + } + }) + } + ///

Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.

fn put_data_catalog_encryption_settings( &self, @@ -12390,6 +15933,32 @@ impl Glue for GlueClient { }) } + ///

Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.

+ fn put_workflow_run_properties( + &self, + input: PutWorkflowRunPropertiesRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.PutWorkflowRunProperties"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(PutWorkflowRunPropertiesError::from_response(response)) + })) + } + }) + } + ///

Resets a bookmark entry.

fn reset_job_bookmark( &self, @@ -12419,6 +15988,35 @@ impl Glue for GlueClient { }) } + ///

Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.

You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.

+ fn search_tables( + &self, + input: SearchTablesRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.SearchTables"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(SearchTablesError::from_response(response))), + ) + } + }) + } + ///

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException.

fn start_crawler( &self, @@ -12476,6 +16074,58 @@ impl Glue for GlueClient { }) } + ///

Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId. You can check on the status of your task run by calling the GetMLTaskRun API.

+ fn start_export_labels_task_run( + &self, + input: StartExportLabelsTaskRunRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.StartExportLabelsTaskRun"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(StartExportLabelsTaskRunError::from_response(response)) + })) + } + }) + } + + ///

Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform.

After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called 'labeling' in the machine learning workflows). In the case of the FindMatches transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?” After the labeling process is finished, users upload their answers/labels with a call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.

By default, StartMLLabelingSetGenerationTaskRun continually learns from and combines all labels that you upload unless you set Replace to true. If you set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.

You can check on the status of your task run by calling the GetMLTaskRun operation.

+ fn start_import_labels_task_run( + &self, + input: StartImportLabelsTaskRunRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.StartImportLabelsTaskRun"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(StartImportLabelsTaskRunError::from_response(response)) + })) + } + }) + } + ///

Starts a job run using a job definition.

fn start_job_run( &self, @@ -12505,6 +16155,66 @@ impl Glue for GlueClient { }) } + ///

Starts a task to estimate the quality of the transform.

When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.

Returns a unique identifier for the run. You can call GetMLTaskRun to get more information about the stats of the EvaluationTaskRun.

+ fn start_ml_evaluation_task_run( + &self, + input: StartMLEvaluationTaskRunRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.StartMLEvaluationTaskRun"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(StartMLEvaluationTaskRunError::from_response(response)) + })) + } + }) + } + + ///

Starts the active learning workflow for your machine learning transform to improve the transform's quality by generating label sets and adding labels.

When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have generated a "labeling set" or a set of questions for humans to answer.

In the case of the FindMatches transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?”

After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.

+ fn start_ml_labeling_set_generation_task_run( + &self, + input: StartMLLabelingSetGenerationTaskRunRequest, + ) -> RusotoFuture< + StartMLLabelingSetGenerationTaskRunResponse, + StartMLLabelingSetGenerationTaskRunError, + > { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AWSGlue.StartMLLabelingSetGenerationTaskRun", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(StartMLLabelingSetGenerationTaskRunError::from_response( + response, + )) + })) + } + }) + } + ///

Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.

fn start_trigger( &self, @@ -12534,6 +16244,35 @@ impl Glue for GlueClient { }) } + ///

Starts a new run of the specified workflow.

+ fn start_workflow_run( + &self, + input: StartWorkflowRunRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.StartWorkflowRun"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(StartWorkflowRunError::from_response(response))), + ) + } + }) + } + ///

If the specified crawler is running, stops the crawl.

fn stop_crawler( &self, @@ -12822,7 +16561,7 @@ impl Glue for GlueClient { }) } - ///

Updates a specified DevEndpoint.

+ ///

Updates a specified development endpoint.

fn update_dev_endpoint( &self, input: UpdateDevEndpointRequest, @@ -12880,6 +16619,35 @@ impl Glue for GlueClient { }) } + ///

Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.

After calling this operation, you can call the StartMLEvaluationTaskRun operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).

+ fn update_ml_transform( + &self, + input: UpdateMLTransformRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.UpdateMLTransform"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateMLTransformError::from_response(response))), + ) + } + }) + } + ///

Updates a partition.

fn update_partition( &self, @@ -12992,4 +16760,33 @@ impl Glue for GlueClient { } }) } + + ///

Updates an existing workflow.

+ fn update_workflow( + &self, + input: UpdateWorkflowRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "glue", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSGlue.UpdateWorkflow"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateWorkflowError::from_response(response))), + ) + } + }) + } } diff --git a/rusoto/services/greengrass/Cargo.toml b/rusoto/services/greengrass/Cargo.toml index da9982d9f96..570f9f77f89 100644 --- a/rusoto/services/greengrass/Cargo.toml +++ b/rusoto/services/greengrass/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_greengrass" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/greengrass/README.md b/rusoto/services/greengrass/README.md index 91e86b1e552..58950b01d4f 100644 --- a/rusoto/services/greengrass/README.md +++ b/rusoto/services/greengrass/README.md @@ -23,9 +23,16 @@ To use `rusoto_greengrass` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_greengrass = "0.40.0" +rusoto_greengrass = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/greengrass/src/custom/mod.rs b/rusoto/services/greengrass/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/greengrass/src/custom/mod.rs +++ b/rusoto/services/greengrass/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/greengrass/src/generated.rs b/rusoto/services/greengrass/src/generated.rs index 461ac9321fc..7a9fcce2a8e 100644 --- a/rusoto/services/greengrass/src/generated.rs +++ b/rusoto/services/greengrass/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -30,14 +29,13 @@ pub struct AssociateRoleToGroupRequest { ///

The ID of the Greengrass group.

#[serde(rename = "GroupId")] pub group_id: String, - ///

The ARN of the role you wish to associate with this group.

+ ///

The ARN of the role you wish to associate with this group. The existence of the role is not validated.

#[serde(rename = "RoleArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub role_arn: Option, + pub role_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateRoleToGroupResponse { ///

The time, in milliseconds since the epoch, when the role ARN was associated with the group.

#[serde(rename = "AssociatedAt")] @@ -49,12 +47,11 @@ pub struct AssociateRoleToGroupResponse { pub struct AssociateServiceRoleToAccountRequest { ///

The ARN of the service role you wish to associate with your account.

#[serde(rename = "RoleArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub role_arn: Option, + pub role_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateServiceRoleToAccountResponse { ///

The time when the service role was associated with the account.

#[serde(rename = "AssociatedAt")] @@ -64,7 +61,7 @@ pub struct AssociateServiceRoleToAccountResponse { ///

Information about a bulk deployment. You cannot start a new bulk deployment while another one is still running or in a non-terminal state.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BulkDeployment { ///

The ARN of the bulk deployment.

#[serde(rename = "BulkDeploymentArn")] @@ -82,7 +79,7 @@ pub struct BulkDeployment { ///

Relevant metrics on input records processed during bulk deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BulkDeploymentMetrics { ///

The total number of records that returned a non-retryable error. For example, this can occur if a group record from the input file uses an invalid format or specifies a nonexistent group version, or if the execution role doesn't grant permission to deploy a group or group version.

#[serde(rename = "InvalidInputRecords")] @@ -100,7 +97,7 @@ pub struct BulkDeploymentMetrics { ///

Information about an individual group deployment in a bulk deployment operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BulkDeploymentResult { ///

The time, in ISO format, when the deployment was created.

#[serde(rename = "CreatedAt")] @@ -162,12 +159,10 @@ pub struct ConnectivityInfo { pub struct Connector { ///

The ARN of the connector.

#[serde(rename = "ConnectorArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub connector_arn: Option, + pub connector_arn: String, ///

A descriptive or arbitrary ID for the connector. This value must be unique within the connector definition version. Max length is 128 characters with pattern [a-zA-Z0-9:_-]+.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, ///

The parameters or configuration that the connector uses.

#[serde(rename = "Parameters")] #[serde(skip_serializing_if = "Option::is_none")] @@ -188,20 +183,17 @@ pub struct ConnectorDefinitionVersion { pub struct Core { ///

The ARN of the certificate associated with the core.

#[serde(rename = "CertificateArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub certificate_arn: Option, + pub certificate_arn: String, ///

A descriptive or arbitrary ID for the core. This value must be unique within the core definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, ///

If true, the core's local shadow is automatically synced with the cloud.

#[serde(rename = "SyncShadow")] #[serde(skip_serializing_if = "Option::is_none")] pub sync_shadow: Option, ///

The ARN of the thing which is the core.

#[serde(rename = "ThingArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub thing_arn: Option, + pub thing_arn: String, } ///

Information about a core definition version.

@@ -227,14 +219,14 @@ pub struct CreateConnectorDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConnectorDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -252,11 +244,11 @@ pub struct CreateConnectorDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -282,7 +274,7 @@ pub struct CreateConnectorDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConnectorDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -317,14 +309,14 @@ pub struct CreateCoreDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCoreDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -342,11 +334,11 @@ pub struct CreateCoreDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -372,7 +364,7 @@ pub struct CreateCoreDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCoreDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -404,8 +396,7 @@ pub struct CreateDeploymentRequest { pub deployment_id: Option, ///

The type of deployment. When used for ''CreateDeployment'', only ''NewDeployment'' and ''Redeployment'' are valid.

#[serde(rename = "DeploymentType")] - #[serde(skip_serializing_if = "Option::is_none")] - pub deployment_type: Option, + pub deployment_type: String, ///

The ID of the Greengrass group.

#[serde(rename = "GroupId")] pub group_id: String, @@ -416,7 +407,7 @@ pub struct CreateDeploymentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeploymentResponse { ///

The ARN of the deployment.

#[serde(rename = "DeploymentArn")] @@ -442,14 +433,14 @@ pub struct CreateDeviceDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeviceDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -467,11 +458,11 @@ pub struct CreateDeviceDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -497,7 +488,7 @@ pub struct CreateDeviceDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeviceDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -531,14 +522,14 @@ pub struct CreateFunctionDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFunctionDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -556,11 +547,11 @@ pub struct CreateFunctionDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -591,7 +582,7 @@ pub struct CreateFunctionDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFunctionDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -623,7 +614,7 @@ pub struct CreateGroupCertificateAuthorityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupCertificateAuthorityResponse { ///

The ARN of the group certificate authority.

#[serde(rename = "GroupCertificateAuthorityArn")] @@ -645,14 +636,14 @@ pub struct CreateGroupRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -670,11 +661,11 @@ pub struct CreateGroupResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -724,7 +715,7 @@ pub struct CreateGroupVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -758,14 +749,14 @@ pub struct CreateLoggerDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLoggerDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -783,11 +774,11 @@ pub struct CreateLoggerDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -813,7 +804,7 @@ pub struct CreateLoggerDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLoggerDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -847,14 +838,14 @@ pub struct CreateResourceDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -872,11 +863,11 @@ pub struct CreateResourceDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -902,7 +893,7 @@ pub struct CreateResourceDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -929,27 +920,22 @@ pub struct CreateSoftwareUpdateJobRequest { #[serde(skip_serializing_if = "Option::is_none")] pub amzn_client_token: Option, #[serde(rename = "S3UrlSignerRole")] - #[serde(skip_serializing_if = "Option::is_none")] - pub s3_url_signer_role: Option, + pub s3_url_signer_role: String, #[serde(rename = "SoftwareToUpdate")] - #[serde(skip_serializing_if = "Option::is_none")] - pub software_to_update: Option, + pub software_to_update: String, #[serde(rename = "UpdateAgentLogLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub update_agent_log_level: Option, #[serde(rename = "UpdateTargets")] - #[serde(skip_serializing_if = "Option::is_none")] - pub update_targets: Option>, + pub update_targets: Vec, #[serde(rename = "UpdateTargetsArchitecture")] - #[serde(skip_serializing_if = "Option::is_none")] - pub update_targets_architecture: Option, + pub update_targets_architecture: String, #[serde(rename = "UpdateTargetsOperatingSystem")] - #[serde(skip_serializing_if = "Option::is_none")] - pub update_targets_operating_system: Option, + pub update_targets_operating_system: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSoftwareUpdateJobResponse { ///

The IoT Job ARN corresponding to this update.

#[serde(rename = "IotJobArn")] @@ -975,14 +961,14 @@ pub struct CreateSubscriptionDefinitionRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

Tag(s) to add to the new resource

+ ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSubscriptionDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1000,11 +986,11 @@ pub struct CreateSubscriptionDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1030,7 +1016,7 @@ pub struct CreateSubscriptionDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSubscriptionDefinitionVersionResponse { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -1052,7 +1038,7 @@ pub struct CreateSubscriptionDefinitionVersionResponse { ///

Information about a definition.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DefinitionInformation { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1070,11 +1056,11 @@ pub struct DefinitionInformation { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1082,7 +1068,7 @@ pub struct DefinitionInformation { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1096,7 +1082,7 @@ pub struct DeleteConnectorDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteConnectorDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1107,7 +1093,7 @@ pub struct DeleteCoreDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCoreDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1118,7 +1104,7 @@ pub struct DeleteDeviceDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDeviceDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1129,7 +1115,7 @@ pub struct DeleteFunctionDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFunctionDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1140,7 +1126,7 @@ pub struct DeleteGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1151,7 +1137,7 @@ pub struct DeleteLoggerDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLoggerDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1162,7 +1148,7 @@ pub struct DeleteResourceDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResourceDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1173,12 +1159,12 @@ pub struct DeleteSubscriptionDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSubscriptionDefinitionResponse {} ///

Information about a deployment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Deployment { ///

The time, in milliseconds since the epoch, when the deployment was created.

#[serde(rename = "CreatedAt")] @@ -1207,20 +1193,17 @@ pub struct Deployment { pub struct Device { ///

The ARN of the certificate associated with the device.

#[serde(rename = "CertificateArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub certificate_arn: Option, + pub certificate_arn: String, ///

A descriptive or arbitrary ID for the device. This value must be unique within the device definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, ///

If true, the device's local shadow will be automatically synced with the cloud.

#[serde(rename = "SyncShadow")] #[serde(skip_serializing_if = "Option::is_none")] pub sync_shadow: Option, ///

The thing ARN of the device.

#[serde(rename = "ThingArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub thing_arn: Option, + pub thing_arn: String, } ///

Information about a device definition version.

@@ -1240,7 +1223,7 @@ pub struct DisassociateRoleFromGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateRoleFromGroupResponse { ///

The time, in milliseconds since the epoch, when the role was disassociated from the group.

#[serde(rename = "DisassociatedAt")] @@ -1252,7 +1235,7 @@ pub struct DisassociateRoleFromGroupResponse { pub struct DisassociateServiceRoleFromAccountRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateServiceRoleFromAccountResponse { ///

The time when the service role was disassociated from the account.

#[serde(rename = "DisassociatedAt")] @@ -1262,7 +1245,7 @@ pub struct DisassociateServiceRoleFromAccountResponse { ///

Details about the error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorDetail { ///

A detailed error code.

#[serde(rename = "DetailedErrorCode")] @@ -1287,8 +1270,7 @@ pub struct Function { pub function_configuration: Option, ///

A descriptive or arbitrary ID for the function. This value must be unique within the function definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, } ///

The configuration of the Lambda function.

@@ -1409,7 +1391,7 @@ pub struct GetAssociatedRoleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAssociatedRoleResponse { ///

The time when the role was associated with the group.

#[serde(rename = "AssociatedAt")] @@ -1429,7 +1411,7 @@ pub struct GetBulkDeploymentStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBulkDeploymentStatusResponse { ///

Relevant metrics on input records processed during bulk deployment.

#[serde(rename = "BulkDeploymentMetrics")] @@ -1451,7 +1433,7 @@ pub struct GetBulkDeploymentStatusResponse { #[serde(rename = "ErrorMessage")] #[serde(skip_serializing_if = "Option::is_none")] pub error_message: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1465,7 +1447,7 @@ pub struct GetConnectivityInfoRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectivityInfoResponse { ///

Connectivity info list.

#[serde(rename = "ConnectivityInfo")] @@ -1485,7 +1467,7 @@ pub struct GetConnectorDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectorDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1503,11 +1485,11 @@ pub struct GetConnectorDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1515,7 +1497,7 @@ pub struct GetConnectorDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1526,7 +1508,7 @@ pub struct GetConnectorDefinitionVersionRequest { ///

The ID of the connector definition.

#[serde(rename = "ConnectorDefinitionId")] pub connector_definition_id: String, - ///

The ID of the connector definition version.

+ ///

The ID of the connector definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListConnectorDefinitionVersions'' requests. If the version is the last one that was associated with a connector definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "ConnectorDefinitionVersionId")] pub connector_definition_version_id: String, ///

The token for the next set of results, or ''null'' if there are no additional results.

@@ -1536,7 +1518,7 @@ pub struct GetConnectorDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectorDefinitionVersionResponse { ///

The ARN of the connector definition version.

#[serde(rename = "Arn")] @@ -1572,7 +1554,7 @@ pub struct GetCoreDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCoreDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1590,11 +1572,11 @@ pub struct GetCoreDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1602,7 +1584,7 @@ pub struct GetCoreDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1613,13 +1595,13 @@ pub struct GetCoreDefinitionVersionRequest { ///

The ID of the core definition.

#[serde(rename = "CoreDefinitionId")] pub core_definition_id: String, - ///

The ID of the core definition version.

+ ///

The ID of the core definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListCoreDefinitionVersions'' requests. If the version is the last one that was associated with a core definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "CoreDefinitionVersionId")] pub core_definition_version_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCoreDefinitionVersionResponse { ///

The ARN of the core definition version.

#[serde(rename = "Arn")] @@ -1658,7 +1640,7 @@ pub struct GetDeploymentStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeploymentStatusResponse { ///

The status of the deployment: ''InProgress'', ''Building'', ''Success'', or ''Failure''.

#[serde(rename = "DeploymentStatus")] @@ -1690,7 +1672,7 @@ pub struct GetDeviceDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1708,11 +1690,11 @@ pub struct GetDeviceDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1720,7 +1702,7 @@ pub struct GetDeviceDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1731,7 +1713,7 @@ pub struct GetDeviceDefinitionVersionRequest { ///

The ID of the device definition.

#[serde(rename = "DeviceDefinitionId")] pub device_definition_id: String, - ///

The ID of the device definition version.

+ ///

The ID of the device definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListDeviceDefinitionVersions'' requests. If the version is the last one that was associated with a device definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "DeviceDefinitionVersionId")] pub device_definition_version_id: String, ///

The token for the next set of results, or ''null'' if there are no additional results.

@@ -1741,7 +1723,7 @@ pub struct GetDeviceDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceDefinitionVersionResponse { ///

The ARN of the device definition version.

#[serde(rename = "Arn")] @@ -1777,7 +1759,7 @@ pub struct GetFunctionDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFunctionDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1795,11 +1777,11 @@ pub struct GetFunctionDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1807,7 +1789,7 @@ pub struct GetFunctionDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1818,7 +1800,7 @@ pub struct GetFunctionDefinitionVersionRequest { ///

The ID of the Lambda function definition.

#[serde(rename = "FunctionDefinitionId")] pub function_definition_id: String, - ///

The ID of the function definition version.

+ ///

The ID of the function definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListFunctionDefinitionVersions'' requests. If the version is the last one that was associated with a function definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "FunctionDefinitionVersionId")] pub function_definition_version_id: String, ///

The token for the next set of results, or ''null'' if there are no additional results.

@@ -1828,7 +1810,7 @@ pub struct GetFunctionDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFunctionDefinitionVersionResponse { ///

The ARN of the function definition version.

#[serde(rename = "Arn")] @@ -1867,7 +1849,7 @@ pub struct GetGroupCertificateAuthorityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupCertificateAuthorityResponse { ///

The ARN of the certificate authority for the group.

#[serde(rename = "GroupCertificateAuthorityArn")] @@ -1891,7 +1873,7 @@ pub struct GetGroupCertificateConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupCertificateConfigurationResponse { ///

The amount of time remaining before the certificate authority expires, in milliseconds.

#[serde(rename = "CertificateAuthorityExpiryInMilliseconds")] @@ -1915,7 +1897,7 @@ pub struct GetGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -1933,11 +1915,11 @@ pub struct GetGroupResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -1945,7 +1927,7 @@ pub struct GetGroupResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -1956,13 +1938,13 @@ pub struct GetGroupVersionRequest { ///

The ID of the Greengrass group.

#[serde(rename = "GroupId")] pub group_id: String, - ///

The ID of the group version.

+ ///

The ID of the group version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListGroupVersions'' requests. If the version is the last one that was associated with a group, the value also maps to the ''LatestVersion'' property of the corresponding ''GroupInformation'' object.

#[serde(rename = "GroupVersionId")] pub group_version_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupVersionResponse { ///

The ARN of the group version.

#[serde(rename = "Arn")] @@ -1994,7 +1976,7 @@ pub struct GetLoggerDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoggerDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -2012,11 +1994,11 @@ pub struct GetLoggerDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -2024,7 +2006,7 @@ pub struct GetLoggerDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -2035,7 +2017,7 @@ pub struct GetLoggerDefinitionVersionRequest { ///

The ID of the logger definition.

#[serde(rename = "LoggerDefinitionId")] pub logger_definition_id: String, - ///

The ID of the logger definition version.

+ ///

The ID of the logger definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListLoggerDefinitionVersions'' requests. If the version is the last one that was associated with a logger definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "LoggerDefinitionVersionId")] pub logger_definition_version_id: String, ///

The token for the next set of results, or ''null'' if there are no additional results.

@@ -2045,7 +2027,7 @@ pub struct GetLoggerDefinitionVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoggerDefinitionVersionResponse { ///

The ARN of the logger definition version.

#[serde(rename = "Arn")] @@ -2077,7 +2059,7 @@ pub struct GetResourceDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -2095,11 +2077,11 @@ pub struct GetResourceDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -2107,7 +2089,7 @@ pub struct GetResourceDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -2118,13 +2100,13 @@ pub struct GetResourceDefinitionVersionRequest { ///

The ID of the resource definition.

#[serde(rename = "ResourceDefinitionId")] pub resource_definition_id: String, - ///

The ID of the resource definition version.

+ ///

The ID of the resource definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListResourceDefinitionVersions'' requests. If the version is the last one that was associated with a resource definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "ResourceDefinitionVersionId")] pub resource_definition_version_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceDefinitionVersionResponse { ///

Arn of the resource definition version.

#[serde(rename = "Arn")] @@ -2152,7 +2134,7 @@ pub struct GetResourceDefinitionVersionResponse { pub struct GetServiceRoleForAccountRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetServiceRoleForAccountResponse { ///

The time when the service role was associated with the account.

#[serde(rename = "AssociatedAt")] @@ -2172,7 +2154,7 @@ pub struct GetSubscriptionDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSubscriptionDefinitionResponse { ///

The ARN of the definition.

#[serde(rename = "Arn")] @@ -2190,11 +2172,11 @@ pub struct GetSubscriptionDefinitionResponse { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the definition.

+ ///

The ID of the latest version associated with the definition.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the definition.

+ ///

The ARN of the latest version associated with the definition.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -2202,7 +2184,7 @@ pub struct GetSubscriptionDefinitionResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The tags for the definition.

+ ///

Tag(s) attached to the resource arn.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -2217,13 +2199,13 @@ pub struct GetSubscriptionDefinitionVersionRequest { ///

The ID of the subscription definition.

#[serde(rename = "SubscriptionDefinitionId")] pub subscription_definition_id: String, - ///

The ID of the subscription definition version.

+ ///

The ID of the subscription definition version. This value maps to the ''Version'' property of the corresponding ''VersionInformation'' object, which is returned by ''ListSubscriptionDefinitionVersions'' requests. If the version is the last one that was associated with a subscription definition, the value also maps to the ''LatestVersion'' property of the corresponding ''DefinitionInformation'' object.

#[serde(rename = "SubscriptionDefinitionVersionId")] pub subscription_definition_version_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSubscriptionDefinitionVersionResponse { ///

The ARN of the subscription definition version.

#[serde(rename = "Arn")] @@ -2253,7 +2235,7 @@ pub struct GetSubscriptionDefinitionVersionResponse { ///

Information about a certificate authority for a group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupCertificateAuthorityProperties { ///

The ARN of the certificate authority for the group.

#[serde(rename = "GroupCertificateAuthorityArn")] @@ -2267,7 +2249,7 @@ pub struct GroupCertificateAuthorityProperties { ///

Information about a group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupInformation { ///

The ARN of the group.

#[serde(rename = "Arn")] @@ -2285,11 +2267,11 @@ pub struct GroupInformation { #[serde(rename = "LastUpdatedTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub last_updated_timestamp: Option, - ///

The latest version of the group.

+ ///

The ID of the latest version associated with the group.

#[serde(rename = "LatestVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version: Option, - ///

The ARN of the latest version of the group.

+ ///

The ARN of the latest version associated with the group.

#[serde(rename = "LatestVersionArn")] #[serde(skip_serializing_if = "Option::is_none")] pub latest_version_arn: Option, @@ -2361,7 +2343,7 @@ pub struct ListBulkDeploymentDetailedReportsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBulkDeploymentDetailedReportsResponse { ///

A list of the individual group deployments in the bulk deployment operation.

#[serde(rename = "Deployments")] @@ -2386,7 +2368,7 @@ pub struct ListBulkDeploymentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBulkDeploymentsResponse { ///

A list of bulk deployments.

#[serde(rename = "BulkDeployments")] @@ -2414,7 +2396,7 @@ pub struct ListConnectorDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConnectorDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2439,7 +2421,7 @@ pub struct ListConnectorDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConnectorDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2467,7 +2449,7 @@ pub struct ListCoreDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCoreDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2492,7 +2474,7 @@ pub struct ListCoreDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCoreDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2520,7 +2502,7 @@ pub struct ListDeploymentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeploymentsResponse { ///

A list of deployments for the requested groups.

#[serde(rename = "Deployments")] @@ -2548,7 +2530,7 @@ pub struct ListDeviceDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeviceDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2573,7 +2555,7 @@ pub struct ListDeviceDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeviceDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2601,7 +2583,7 @@ pub struct ListFunctionDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFunctionDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2626,7 +2608,7 @@ pub struct ListFunctionDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFunctionDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2646,7 +2628,7 @@ pub struct ListGroupCertificateAuthoritiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupCertificateAuthoritiesResponse { ///

A list of certificate authorities associated with the group.

#[serde(rename = "GroupCertificateAuthorities")] @@ -2670,7 +2652,7 @@ pub struct ListGroupVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2695,7 +2677,7 @@ pub struct ListGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupsResponse { ///

Information about a group.

#[serde(rename = "Groups")] @@ -2723,7 +2705,7 @@ pub struct ListLoggerDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLoggerDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2748,7 +2730,7 @@ pub struct ListLoggerDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLoggerDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2776,7 +2758,7 @@ pub struct ListResourceDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2801,7 +2783,7 @@ pub struct ListResourceDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2829,7 +2811,7 @@ pub struct ListSubscriptionDefinitionVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSubscriptionDefinitionVersionsResponse { ///

The token for the next set of results, or ''null'' if there are no additional results.

#[serde(rename = "NextToken")] @@ -2854,7 +2836,7 @@ pub struct ListSubscriptionDefinitionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSubscriptionDefinitionsResponse { ///

Information about a definition.

#[serde(rename = "Definitions")] @@ -2874,9 +2856,8 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { - ///

A map of the key-value pairs for the resource tag.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, @@ -2917,24 +2898,20 @@ pub struct LocalVolumeResourceData { pub struct Logger { ///

The component that will be subject to logging.

#[serde(rename = "Component")] - #[serde(skip_serializing_if = "Option::is_none")] - pub component: Option, + pub component: String, ///

A descriptive or arbitrary ID for the logger. This value must be unique within the logger definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, ///

The level of the logs.

#[serde(rename = "Level")] - #[serde(skip_serializing_if = "Option::is_none")] - pub level: Option, + pub level: String, ///

The amount of file space, in KB, to use if the local file system is used for logging purposes.

#[serde(rename = "Space")] #[serde(skip_serializing_if = "Option::is_none")] pub space: Option, ///

The type of log output which will be used.

#[serde(rename = "Type")] - #[serde(skip_serializing_if = "Option::is_none")] - pub type_: Option, + pub type_: String, } ///

Information about a logger definition version.

@@ -2963,7 +2940,7 @@ pub struct ResetDeploymentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetDeploymentsResponse { ///

The ARN of the deployment.

#[serde(rename = "DeploymentArn")] @@ -2980,16 +2957,13 @@ pub struct ResetDeploymentsResponse { pub struct Resource { ///

The resource ID, used to refer to a resource in the Lambda function configuration. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''. This must be unique within a Greengrass group.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, ///

The descriptive resource name, which is displayed on the AWS IoT Greengrass console. Max length 128 characters with pattern ''[a-zA-Z0-9:_-]+''. This must be unique within a Greengrass group.

#[serde(rename = "Name")] - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, + pub name: String, ///

A container of data for all resource types.

#[serde(rename = "ResourceDataContainer")] - #[serde(skip_serializing_if = "Option::is_none")] - pub resource_data_container: Option, + pub resource_data_container: ResourceDataContainer, } ///

A policy used by the function to access a resource.

@@ -3001,8 +2975,7 @@ pub struct ResourceAccessPolicy { pub permission: Option, ///

The ID of the resource. (This ID is assigned to the resource when you create the resource definiton.)

#[serde(rename = "ResourceId")] - #[serde(skip_serializing_if = "Option::is_none")] - pub resource_id: Option, + pub resource_id: String, } ///

A container for resource data. The container takes only one of the following supported resource data types: ''LocalDeviceResourceData'', ''LocalVolumeResourceData'', ''SageMakerMachineLearningModelResourceData'', ''S3MachineLearningModelResourceData'', ''SecretsManagerSecretResourceData''.

@@ -3087,20 +3060,18 @@ pub struct StartBulkDeploymentRequest { pub amzn_client_token: Option, ///

The ARN of the execution role to associate with the bulk deployment operation. This IAM role must allow the ''greengrass:CreateDeployment'' action for all group versions that are listed in the input file. This IAM role must have access to the S3 bucket containing the input file.

#[serde(rename = "ExecutionRoleArn")] - #[serde(skip_serializing_if = "Option::is_none")] - pub execution_role_arn: Option, + pub execution_role_arn: String, ///

The URI of the input file contained in the S3 bucket. The execution role must have ''getObject'' permissions on this bucket to access the input file. The input file is a JSON-serialized, line delimited file with UTF-8 encoding that provides a list of group and version IDs and the deployment type. This file must be less than 100 MB. Currently, AWS IoT Greengrass supports only ''NewDeployment'' deployment types.

#[serde(rename = "InputFileUri")] - #[serde(skip_serializing_if = "Option::is_none")] - pub input_file_uri: Option, - ///

Tag(s) to add to the new resource

+ pub input_file_uri: String, + ///

Tag(s) to add to the new resource.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartBulkDeploymentResponse { ///

The ARN of the bulk deployment.

#[serde(rename = "BulkDeploymentArn")] @@ -3120,7 +3091,7 @@ pub struct StopBulkDeploymentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopBulkDeploymentResponse {} ///

Information about a subscription.

@@ -3128,20 +3099,16 @@ pub struct StopBulkDeploymentResponse {} pub struct Subscription { ///

A descriptive or arbitrary ID for the subscription. This value must be unique within the subscription definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.

#[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: String, ///

The source of the subscription. Can be a thing ARN, a Lambda function ARN, a connector ARN, 'cloud' (which represents the AWS IoT cloud), or 'GGShadowService'.

#[serde(rename = "Source")] - #[serde(skip_serializing_if = "Option::is_none")] - pub source: Option, + pub source: String, ///

The MQTT topic used to route the message.

#[serde(rename = "Subject")] - #[serde(skip_serializing_if = "Option::is_none")] - pub subject: Option, + pub subject: String, ///

Where the message is sent to. Can be a thing ARN, a Lambda function ARN, a connector ARN, 'cloud' (which represents the AWS IoT cloud), or 'GGShadowService'.

#[serde(rename = "Target")] - #[serde(skip_serializing_if = "Option::is_none")] - pub target: Option, + pub target: String, } ///

Information about a subscription definition version.

@@ -3153,14 +3120,15 @@ pub struct SubscriptionDefinitionVersion { pub subscriptions: Option>, } +///

A map of the key-value pairs for the resource tag.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TagResourceRequest { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "ResourceArn")] pub resource_arn: String, - ///

A map of the key-value pairs for the resource tag.

#[serde(rename = "tags")] - pub tags: ::std::collections::HashMap, + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3168,7 +3136,7 @@ pub struct UntagResourceRequest { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "ResourceArn")] pub resource_arn: String, - ///

A list of the keys to remove from the resource tags.

+ ///

An array of tag keys to delete

#[serde(rename = "TagKeys")] pub tag_keys: Vec, } @@ -3186,7 +3154,7 @@ pub struct UpdateConnectivityInfoRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConnectivityInfoResponse { ///

A message about the connectivity info update request.

#[serde(rename = "Message")] @@ -3210,7 +3178,7 @@ pub struct UpdateConnectorDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConnectorDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3225,7 +3193,7 @@ pub struct UpdateCoreDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCoreDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3240,7 +3208,7 @@ pub struct UpdateDeviceDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeviceDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3255,7 +3223,7 @@ pub struct UpdateFunctionDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFunctionDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3270,7 +3238,7 @@ pub struct UpdateGroupCertificateConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGroupCertificateConfigurationResponse { ///

The amount of time remaining before the certificate authority expires, in milliseconds.

#[serde(rename = "CertificateAuthorityExpiryInMilliseconds")] @@ -3298,7 +3266,7 @@ pub struct UpdateGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3313,7 +3281,7 @@ pub struct UpdateLoggerDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateLoggerDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3328,7 +3296,7 @@ pub struct UpdateResourceDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateResourceDefinitionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3343,12 +3311,12 @@ pub struct UpdateSubscriptionDefinitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSubscriptionDefinitionResponse {} ///

Information about a version.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VersionInformation { ///

The ARN of the version.

#[serde(rename = "Arn")] @@ -6961,7 +6929,7 @@ pub trait GreenGrass { input: ListSubscriptionDefinitionsRequest, ) -> RusotoFuture; - ///

Retrieves the tags for a resource.

+ ///

Retrieves a list of resource tags for a resource arn.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -6985,10 +6953,10 @@ pub trait GreenGrass { input: StopBulkDeploymentRequest, ) -> RusotoFuture; - ///

Add tags to a resource.

+ ///

Add resource tags to a Greengrass Resource. Valid resources are Group, Connector, Core, Device, Function, Logger, Subscription, and Resource Defintions, and also BulkDeploymentIds.

fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError>; - ///

Remove tags with specified keys from a resource.

+ ///

Remove resource tags from a Greengrass Resource.

fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError>; ///

Updates the connectivity information for the core. Any devices that belong to the group which has this core will receive this information in order to find the location of the core and connect to it.

@@ -7066,10 +7034,7 @@ impl GreenGrassClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> GreenGrassClient { - GreenGrassClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7083,10 +7048,14 @@ impl GreenGrassClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - GreenGrassClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> GreenGrassClient { + GreenGrassClient { client, region } } } @@ -9601,7 +9570,7 @@ impl GreenGrass for GreenGrassClient { }) } - ///

Retrieves the tags for a resource.

+ ///

Retrieves a list of resource tags for a resource arn.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -9735,7 +9704,7 @@ impl GreenGrass for GreenGrassClient { }) } - ///

Add tags to a resource.

+ ///

Add resource tags to a Greengrass Resource. Valid resources are Group, Connector, Core, Device, Function, Logger, Subscription, and Resource Defintions, and also BulkDeploymentIds.

fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError> { let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); @@ -9763,7 +9732,7 @@ impl GreenGrass for GreenGrassClient { }) } - ///

Remove tags with specified keys from a resource.

+ ///

Remove resource tags from a Greengrass Resource.

fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError> { let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); diff --git a/rusoto/services/guardduty/Cargo.toml b/rusoto/services/guardduty/Cargo.toml index 704792291ab..2f0139a10b4 100644 --- a/rusoto/services/guardduty/Cargo.toml +++ b/rusoto/services/guardduty/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_guardduty" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/guardduty/README.md b/rusoto/services/guardduty/README.md index 3f333cc1998..5bd07362ba5 100644 --- a/rusoto/services/guardduty/README.md +++ b/rusoto/services/guardduty/README.md @@ -23,9 +23,16 @@ To use `rusoto_guardduty` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_guardduty = "0.40.0" +rusoto_guardduty = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/guardduty/src/custom/mod.rs b/rusoto/services/guardduty/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/guardduty/src/custom/mod.rs +++ b/rusoto/services/guardduty/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/guardduty/src/generated.rs b/rusoto/services/guardduty/src/generated.rs index 9afd93ef2a7..08e27e5f5d1 100644 --- a/rusoto/services/guardduty/src/generated.rs +++ b/rusoto/services/guardduty/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -39,11 +38,12 @@ pub struct AcceptInvitationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptInvitationResponse {} +///

Contains information about the access keys.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccessKeyDetails { ///

Access key ID of the user.

#[serde(rename = "AccessKeyId")] @@ -63,6 +63,7 @@ pub struct AccessKeyDetails { pub user_type: Option, } +///

Contains information about the account.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct AccountDetail { ///

Member account ID.

@@ -73,8 +74,9 @@ pub struct AccountDetail { pub email: String, } +///

Contains information about action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Action { ///

GuardDuty Finding activity type.

#[serde(rename = "ActionType")] @@ -109,11 +111,12 @@ pub struct ArchiveFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ArchiveFindingsResponse {} +///

Contains information about the API operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AwsApiCallAction { ///

AWS API name.

#[serde(rename = "Api")] @@ -137,8 +140,9 @@ pub struct AwsApiCallAction { pub service_name: Option, } +///

Contains information about the city associated with the IP address.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct City { ///

City name of the remote IP address.

#[serde(rename = "CityName")] @@ -146,8 +150,10 @@ pub struct City { pub city_name: Option, } +///

Contains information about the condition.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Condition { + ///

Represents an equal condition to be applied to a single field when querying for findings.

#[serde(rename = "Equals")] #[serde(skip_serializing_if = "Option::is_none")] pub equals: Option>, @@ -167,13 +173,15 @@ pub struct Condition { #[serde(rename = "LessThanOrEqual")] #[serde(skip_serializing_if = "Option::is_none")] pub less_than_or_equal: Option, + ///

Represents an not equal condition to be applied to a single field when querying for findings.

#[serde(rename = "NotEquals")] #[serde(skip_serializing_if = "Option::is_none")] pub not_equals: Option>, } +///

Contains information about the country.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Country { ///

Country code of the remote IP address.

#[serde(rename = "CountryCode")] @@ -198,10 +206,14 @@ pub struct CreateDetectorRequest { #[serde(rename = "FindingPublishingFrequency")] #[serde(skip_serializing_if = "Option::is_none")] pub finding_publishing_frequency: Option, + ///

The tags to be added to a new detector resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDetectorResponse { ///

The unique ID of the created detector.

#[serde(rename = "DetectorId")] @@ -236,10 +248,14 @@ pub struct CreateFilterRequest { #[serde(rename = "Rank")] #[serde(skip_serializing_if = "Option::is_none")] pub rank: Option, + ///

The tags to be added to a new filter resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFilterResponse { ///

The name of the successfully created filter.

#[serde(rename = "Name")] @@ -267,10 +283,14 @@ pub struct CreateIPSetRequest { ///

The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.

#[serde(rename = "Name")] pub name: String, + ///

The tags to be added to a new IP set resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIPSetResponse { ///

The ID of the IPSet resource.

#[serde(rename = "IpSetId")] @@ -288,7 +308,7 @@ pub struct CreateMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateMembersResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] @@ -307,7 +327,7 @@ pub struct CreateSampleFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSampleFindingsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -331,10 +351,14 @@ pub struct CreateThreatIntelSetRequest { ///

A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.

#[serde(rename = "Name")] pub name: String, + ///

The tags to be added to a new Threat List resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateThreatIntelSetResponse { ///

The ID of the ThreatIntelSet resource.

#[serde(rename = "ThreatIntelSetId")] @@ -349,7 +373,7 @@ pub struct DeclineInvitationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeclineInvitationsResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] @@ -364,7 +388,7 @@ pub struct DeleteDetectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDetectorResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -378,7 +402,7 @@ pub struct DeleteFilterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFilterResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -392,7 +416,7 @@ pub struct DeleteIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteIPSetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -403,7 +427,7 @@ pub struct DeleteInvitationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInvitationsResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] @@ -421,9 +445,9 @@ pub struct DeleteMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteMembersResponse { - ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

+ ///

The accounts that could not be processed.

#[serde(rename = "UnprocessedAccounts")] pub unprocessed_accounts: Vec, } @@ -439,7 +463,7 @@ pub struct DeleteThreatIntelSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteThreatIntelSetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -450,7 +474,7 @@ pub struct DisassociateFromMasterAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateFromMasterAccountResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -464,15 +488,16 @@ pub struct DisassociateMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateMembersResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] pub unprocessed_accounts: Vec, } +///

Contains information about the DNS request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DnsRequestAction { ///

Domain information for the DNS request.

#[serde(rename = "Domain")] @@ -480,8 +505,9 @@ pub struct DnsRequestAction { pub domain: Option, } +///

Contains information about the domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainDetails { ///

Domain information for the AWS API call.

#[serde(rename = "Domain")] @@ -489,61 +515,71 @@ pub struct DomainDetails { pub domain: Option, } +///

Contains information about the reason that the finding was generated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Evidence { + ///

A list of threat intelligence details related to the evidence.

+ #[serde(rename = "ThreatIntelligenceDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intelligence_details: Option>, +} + +///

Contains information about the finding.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Finding { - ///

AWS account ID where the activity occurred that prompted GuardDuty to generate a finding.

+ ///

The ID of the account in which the finding was generated.

#[serde(rename = "AccountId")] pub account_id: String, - ///

The ARN of a finding described by the action.

+ ///

The ARN for the finding.

#[serde(rename = "Arn")] pub arn: String, - ///

The confidence level of a finding.

+ ///

The confidence score for the finding.

#[serde(rename = "Confidence")] #[serde(skip_serializing_if = "Option::is_none")] pub confidence: Option, - ///

The time stamp at which a finding was generated.

+ ///

The time and date at which the finding was created.

#[serde(rename = "CreatedAt")] pub created_at: String, - ///

The description of a finding.

+ ///

The description of the finding.

#[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

The identifier that corresponds to a finding described by the action.

+ ///

The ID of the finding.

#[serde(rename = "Id")] pub id: String, - ///

The AWS resource partition.

+ ///

The partition associated with the finding.

#[serde(rename = "Partition")] #[serde(skip_serializing_if = "Option::is_none")] pub partition: Option, - ///

The AWS region where the activity occurred that prompted GuardDuty to generate a finding.

+ ///

The Region in which the finding was generated.

#[serde(rename = "Region")] pub region: String, - ///

The AWS resource associated with the activity that prompted GuardDuty to generate a finding.

#[serde(rename = "Resource")] pub resource: Resource, - ///

Findings' schema version.

+ ///

The version of the schema used for the finding.

#[serde(rename = "SchemaVersion")] pub schema_version: String, - ///

Additional information assigned to the generated finding by GuardDuty.

#[serde(rename = "Service")] #[serde(skip_serializing_if = "Option::is_none")] pub service: Option, - ///

The severity of a finding.

+ ///

The severity of the finding.

#[serde(rename = "Severity")] pub severity: f64, - ///

The title of a finding.

+ ///

The title for the finding.

#[serde(rename = "Title")] #[serde(skip_serializing_if = "Option::is_none")] pub title: Option, - ///

The type of a finding described by the action.

+ ///

The type of the finding.

#[serde(rename = "Type")] pub type_: String, - ///

The time stamp at which a finding was last updated.

+ ///

The time and date at which the finding was laste updated.

#[serde(rename = "UpdatedAt")] pub updated_at: String, } +///

Contains finding criteria information.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct FindingCriteria { ///

Represents a map of finding properties that match specified conditions and values when querying findings.

@@ -552,8 +588,9 @@ pub struct FindingCriteria { pub criterion: Option<::std::collections::HashMap>, } +///

Contains information about finding statistics.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FindingStatistics { ///

Represents a map of severity to count statistic for a set of findings

#[serde(rename = "CountBySeverity")] @@ -561,8 +598,9 @@ pub struct FindingStatistics { pub count_by_severity: Option<::std::collections::HashMap>, } +///

Contains information about the

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GeoLocation { ///

Latitude information of remote IP address.

#[serde(rename = "Lat")] @@ -582,7 +620,7 @@ pub struct GetDetectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDetectorResponse { ///

Detector creation timestamp.

#[serde(rename = "CreatedAt")] @@ -598,6 +636,10 @@ pub struct GetDetectorResponse { ///

The detector status.

#[serde(rename = "Status")] pub status: String, + ///

The tags of the detector resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, ///

Detector last update timestamp.

#[serde(rename = "UpdatedAt")] #[serde(skip_serializing_if = "Option::is_none")] @@ -615,7 +657,7 @@ pub struct GetFilterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFilterResponse { ///

Specifies the action that is to be applied to the findings that match the filter.

#[serde(rename = "Action")] @@ -634,6 +676,10 @@ pub struct GetFilterResponse { #[serde(rename = "Rank")] #[serde(skip_serializing_if = "Option::is_none")] pub rank: Option, + ///

The tags of the filter resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -651,7 +697,7 @@ pub struct GetFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFindingsResponse { ///

A list of findings.

#[serde(rename = "Findings")] @@ -673,7 +719,7 @@ pub struct GetFindingsStatisticsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFindingsStatisticsResponse { ///

Finding statistics object.

#[serde(rename = "FindingStatistics")] @@ -691,7 +737,7 @@ pub struct GetIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIPSetResponse { ///

The format of the file that contains the IPSet.

#[serde(rename = "Format")] @@ -705,13 +751,17 @@ pub struct GetIPSetResponse { ///

The status of ipSet file uploaded.

#[serde(rename = "Status")] pub status: String, + ///

The tags of the IP set resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetInvitationsCountRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInvitationsCountResponse { ///

The number of received invitations.

#[serde(rename = "InvitationsCount")] @@ -727,7 +777,7 @@ pub struct GetMasterAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMasterAccountResponse { ///

Master account details.

#[serde(rename = "Master")] @@ -745,7 +795,7 @@ pub struct GetMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMembersResponse { ///

A list of members.

#[serde(rename = "Members")] @@ -766,7 +816,7 @@ pub struct GetThreatIntelSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetThreatIntelSetResponse { ///

The format of the threatIntelSet.

#[serde(rename = "Format")] @@ -780,10 +830,15 @@ pub struct GetThreatIntelSetResponse { ///

The status of threatIntelSet file uploaded.

#[serde(rename = "Status")] pub status: String, + ///

The tags of the Threat List resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, } +///

Contains information about the instance profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IamInstanceProfile { ///

AWS EC2 instance profile ARN.

#[serde(rename = "Arn")] @@ -795,8 +850,9 @@ pub struct IamInstanceProfile { pub id: Option, } +///

Contains information about the details of an instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceDetails { ///

The availability zone of the EC2 instance.

#[serde(rename = "AvailabilityZone")] @@ -848,8 +904,9 @@ pub struct InstanceDetails { pub tags: Option>, } +///

Contains information about the invitation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Invitation { ///

Inviter account ID

#[serde(rename = "AccountId")] @@ -888,7 +945,7 @@ pub struct InviteMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InviteMembersResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] @@ -908,7 +965,7 @@ pub struct ListDetectorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDetectorsResponse { ///

A list of detector Ids.

#[serde(rename = "DetectorIds")] @@ -935,7 +992,7 @@ pub struct ListFiltersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFiltersResponse { ///

A list of filter names

#[serde(rename = "FilterNames")] @@ -970,7 +1027,7 @@ pub struct ListFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFindingsResponse { ///

The IDs of the findings you are listing.

#[serde(rename = "FindingIds")] @@ -997,7 +1054,7 @@ pub struct ListIPSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIPSetsResponse { ///

The IDs of the IPSet resources.

#[serde(rename = "IpSetIds")] @@ -1021,7 +1078,7 @@ pub struct ListInvitationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInvitationsResponse { ///

A list of invitation descriptions.

#[serde(rename = "Invitations")] @@ -1053,7 +1110,7 @@ pub struct ListMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListMembersResponse { ///

A list of members.

#[serde(rename = "Members")] @@ -1065,6 +1122,22 @@ pub struct ListMembersResponse { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + ///

The Amazon Resource Name (ARN) for the given GuardDuty resource

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + ///

The tags associated with the resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListThreatIntelSetsRequest { ///

The unique ID of the detector the threatIntelSet is associated with.

@@ -1081,7 +1154,7 @@ pub struct ListThreatIntelSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThreatIntelSetsResponse { ///

Pagination parameter to be used on the next list operation to retrieve more items.

#[serde(rename = "NextToken")] @@ -1092,8 +1165,9 @@ pub struct ListThreatIntelSetsResponse { pub threat_intel_set_ids: Vec, } +///

Contains information about the port for the local connection.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LocalPortDetails { ///

Port number of the local connection.

#[serde(rename = "Port")] @@ -1105,10 +1179,11 @@ pub struct LocalPortDetails { pub port_name: Option, } +///

Contains information about the Master account and invitation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Master { - ///

Master account ID

+ ///

The ID of the account used as the Master account.

#[serde(rename = "AccountId")] #[serde(skip_serializing_if = "Option::is_none")] pub account_id: Option, @@ -1116,7 +1191,7 @@ pub struct Master { #[serde(rename = "InvitationId")] #[serde(skip_serializing_if = "Option::is_none")] pub invitation_id: Option, - ///

Timestamp at which the invitation was sent

+ ///

Timestamp at which the invitation was sent.

#[serde(rename = "InvitedAt")] #[serde(skip_serializing_if = "Option::is_none")] pub invited_at: Option, @@ -1126,8 +1201,9 @@ pub struct Master { pub relationship_status: Option, } +///

Continas information about the member account

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Member { ///

Member account ID.

#[serde(rename = "AccountId")] @@ -1154,8 +1230,9 @@ pub struct Member { pub updated_at: String, } +///

Contains information about the network connection.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkConnectionAction { ///

Network connection blocked information.

#[serde(rename = "Blocked")] @@ -1183,8 +1260,9 @@ pub struct NetworkConnectionAction { pub remote_port_details: Option, } +///

Contains information about the network interface.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkInterface { ///

A list of EC2 instance IPv6 address information.

#[serde(rename = "Ipv6Addresses")] @@ -1228,8 +1306,9 @@ pub struct NetworkInterface { pub vpc_id: Option, } +///

Continas information about the organization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Organization { ///

Autonomous system number of the internet provider of the remote IP address.

#[serde(rename = "Asn")] @@ -1249,8 +1328,9 @@ pub struct Organization { pub org: Option, } +///

Contains information about the port probe.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PortProbeAction { ///

Port probe blocked information.

#[serde(rename = "Blocked")] @@ -1262,8 +1342,9 @@ pub struct PortProbeAction { pub port_probe_details: Option>, } +///

Contains information about the port probe details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PortProbeDetail { ///

Local port information of the connection.

#[serde(rename = "LocalPortDetails")] @@ -1275,8 +1356,9 @@ pub struct PortProbeDetail { pub remote_ip_details: Option, } +///

Contains information about the private IP address.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PrivateIpAddressDetails { ///

Private DNS name of the EC2 instance.

#[serde(rename = "PrivateDnsName")] @@ -1288,8 +1370,9 @@ pub struct PrivateIpAddressDetails { pub private_ip_address: Option, } +///

Contains information about the product code.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProductCode { ///

Product code information.

#[serde(rename = "Code")] @@ -1301,8 +1384,9 @@ pub struct ProductCode { pub product_type: Option, } +///

Continas information about the remote IP address.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoteIpDetails { ///

City information of the remote IP address.

#[serde(rename = "City")] @@ -1326,8 +1410,9 @@ pub struct RemoteIpDetails { pub organization: Option, } +///

Contains information about the remote port.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemotePortDetails { ///

Port number of the remote connection.

#[serde(rename = "Port")] @@ -1339,8 +1424,9 @@ pub struct RemotePortDetails { pub port_name: Option, } +///

Contains information about the resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { ///

The IAM access key details (IAM user information) of a user that engaged in the activity that prompted GuardDuty to generate a finding.

#[serde(rename = "AccessKeyDetails")] @@ -1356,8 +1442,9 @@ pub struct Resource { pub resource_type: Option, } +///

Contains information about the security group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityGroup { ///

EC2 instance's security group ID.

#[serde(rename = "GroupId")] @@ -1369,8 +1456,9 @@ pub struct SecurityGroup { pub group_name: Option, } +///

Contains information about the service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Service { ///

Information about the activity described in a finding.

#[serde(rename = "Action")] @@ -1396,6 +1484,10 @@ pub struct Service { #[serde(rename = "EventLastSeen")] #[serde(skip_serializing_if = "Option::is_none")] pub event_last_seen: Option, + ///

An evidence object associated with the service.

+ #[serde(rename = "Evidence")] + #[serde(skip_serializing_if = "Option::is_none")] + pub evidence: Option, ///

Resource role information for this finding.

#[serde(rename = "ResourceRole")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1410,6 +1502,7 @@ pub struct Service { pub user_feedback: Option, } +///

Contains information about the criteria for sorting.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SortCriteria { ///

Represents the finding attribute (for example, accountId) by which to sort findings.

@@ -1433,7 +1526,7 @@ pub struct StartMonitoringMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartMonitoringMembersResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] @@ -1451,15 +1544,16 @@ pub struct StopMonitoringMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopMonitoringMembersResponse { ///

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

#[serde(rename = "UnprocessedAccounts")] pub unprocessed_accounts: Vec, } +///

Contains information about the tag associated with the resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Tag { ///

EC2 instance tag key.

#[serde(rename = "Key")] @@ -1471,6 +1565,34 @@ pub struct Tag { pub value: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

The Amazon Resource Name (ARN) for the given GuardDuty resource

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The tags to be added to a resource.

+ #[serde(rename = "Tags")] + pub tags: ::std::collections::HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + +///

An instance of a threat intelligence detail that constitutes evidence for the finding.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ThreatIntelligenceDetail { + ///

The name of the threat intelligence list that triggered the finding.

+ #[serde(rename = "ThreatListName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_list_name: Option, + ///

A list of names of the threats in the threat intelligence list that triggered the finding.

+ #[serde(rename = "ThreatNames")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_names: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UnarchiveFindingsRequest { ///

The ID of the detector that specifies the GuardDuty service whose findings you want to unarchive.

@@ -1482,11 +1604,12 @@ pub struct UnarchiveFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnarchiveFindingsResponse {} +///

Contains information about the accounts that were not processed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnprocessedAccount { ///

AWS Account ID.

#[serde(rename = "AccountId")] @@ -1496,6 +1619,20 @@ pub struct UnprocessedAccount { pub result: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

The Amazon Resource Name (ARN) for the given GuardDuty resource

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The tag keys to remove from a resource.

+ #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDetectorRequest { ///

The unique ID of the detector that you want to update.

@@ -1512,7 +1649,7 @@ pub struct UpdateDetectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDetectorResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1542,7 +1679,7 @@ pub struct UpdateFilterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFilterResponse { ///

The name of the filter.

#[serde(rename = "Name")] @@ -1567,7 +1704,7 @@ pub struct UpdateFindingsFeedbackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFindingsFeedbackResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1593,7 +1730,7 @@ pub struct UpdateIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIPSetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1619,7 +1756,7 @@ pub struct UpdateThreatIntelSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateThreatIntelSetResponse {} /// Errors returned by AcceptInvitation @@ -2937,6 +3074,47 @@ impl Error for ListMembersError { } } } +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

Bad request exception object.

+ BadRequest(String), + ///

Internal server error exception object.

+ InternalServerError(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(ListTagsForResourceError::BadRequest(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(ListTagsForResourceError::InternalServerError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::BadRequest(ref cause) => cause, + ListTagsForResourceError::InternalServerError(ref cause) => cause, + } + } +} /// Errors returned by ListThreatIntelSets #[derive(Debug, PartialEq)] pub enum ListThreatIntelSetsError { @@ -3060,6 +3238,45 @@ impl Error for StopMonitoringMembersError { } } } +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

Bad request exception object.

+ BadRequest(String), + ///

Internal server error exception object.

+ InternalServerError(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(TagResourceError::BadRequest(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(TagResourceError::InternalServerError(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::BadRequest(ref cause) => cause, + TagResourceError::InternalServerError(ref cause) => cause, + } + } +} /// Errors returned by UnarchiveFindings #[derive(Debug, PartialEq)] pub enum UnarchiveFindingsError { @@ -3101,6 +3318,45 @@ impl Error for UnarchiveFindingsError { } } } +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

Bad request exception object.

+ BadRequest(String), + ///

Internal server error exception object.

+ InternalServerError(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(UntagResourceError::BadRequest(err.msg)) + } + "InternalServerErrorException" => { + return RusotoError::Service(UntagResourceError::InternalServerError(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::BadRequest(ref cause) => cause, + UntagResourceError::InternalServerError(ref cause) => cause, + } + } +} /// Errors returned by UpdateDetector #[derive(Debug, PartialEq)] pub enum UpdateDetectorError { @@ -3308,13 +3564,13 @@ pub trait GuardDuty { input: AcceptInvitationRequest, ) -> RusotoFuture; - ///

Archives Amazon GuardDuty findings specified by the list of finding IDs.

+ ///

Archives GuardDuty findings specified by the list of finding IDs.

Only the master account can archive findings. Member accounts do not have permission to archive findings from their accounts.

fn archive_findings( &self, input: ArchiveFindingsRequest, ) -> RusotoFuture; - ///

Creates a single Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector must be created in order for GuardDuty to become operational.

+ ///

Creates a single Amazon GuardDuty detector. A detector is a resource that represents the GuardDuty service. To start using GuardDuty, you must create a detector in each region that you enable the service. You can have only one detector per account per region.

fn create_detector( &self, input: CreateDetectorRequest, @@ -3436,7 +3692,7 @@ pub trait GuardDuty { &self, ) -> RusotoFuture; - ///

Provides the details for the GuardDuty master account to the current GuardDuty member account.

+ ///

Provides the details for the GuardDuty master account associated with the current GuardDuty member account.

fn get_master_account( &self, input: GetMasterAccountRequest, @@ -3496,6 +3752,12 @@ pub trait GuardDuty { input: ListMembersRequest, ) -> RusotoFuture; + ///

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource..

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + ///

Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID.

fn list_threat_intel_sets( &self, @@ -3514,12 +3776,24 @@ pub trait GuardDuty { input: StopMonitoringMembersRequest, ) -> RusotoFuture; + ///

Adds tags to a resource.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + ///

Unarchives Amazon GuardDuty findings specified by the list of finding IDs.

fn unarchive_findings( &self, input: UnarchiveFindingsRequest, ) -> RusotoFuture; + ///

Removes tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + ///

Updates an Amazon GuardDuty detector specified by the detectorId.

fn update_detector( &self, @@ -3562,10 +3836,7 @@ impl GuardDutyClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> GuardDutyClient { - GuardDutyClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3579,10 +3850,14 @@ impl GuardDutyClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - GuardDutyClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> GuardDutyClient { + GuardDutyClient { client, region } } } @@ -3622,7 +3897,7 @@ impl GuardDuty for GuardDutyClient { }) } - ///

Archives Amazon GuardDuty findings specified by the list of finding IDs.

+ ///

Archives GuardDuty findings specified by the list of finding IDs.

Only the master account can archive findings. Member accounts do not have permission to archive findings from their accounts.

fn archive_findings( &self, input: ArchiveFindingsRequest, @@ -3657,7 +3932,7 @@ impl GuardDuty for GuardDutyClient { }) } - ///

Creates a single Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector must be created in order for GuardDuty to become operational.

+ ///

Creates a single Amazon GuardDuty detector. A detector is a resource that represents the GuardDuty service. To start using GuardDuty, you must create a detector in each region that you enable the service. You can have only one detector per account per region.

fn create_detector( &self, input: CreateDetectorRequest, @@ -4340,7 +4615,7 @@ impl GuardDuty for GuardDutyClient { }) } - ///

Provides the details for the GuardDuty master account to the current GuardDuty member account.

+ ///

Provides the details for the GuardDuty master account associated with the current GuardDuty member account.

fn get_master_account( &self, input: GetMasterAccountRequest, @@ -4712,6 +4987,34 @@ impl GuardDuty for GuardDutyClient { }) } + ///

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource..

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("GET", "guardduty", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 200 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + ///

Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID.

fn list_threat_intel_sets( &self, @@ -4820,6 +5123,38 @@ impl GuardDuty for GuardDutyClient { }) } + ///

Adds tags to a resource.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("POST", "guardduty", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 204 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + ///

Unarchives Amazon GuardDuty findings specified by the list of finding IDs.

fn unarchive_findings( &self, @@ -4855,6 +5190,41 @@ impl GuardDuty for GuardDutyClient { }) } + ///

Removes tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("DELETE", "guardduty", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + for item in input.tag_keys.iter() { + params.put("tagKeys", item); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.as_u16() == 204 { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + ///

Updates an Amazon GuardDuty detector specified by the detectorId.

fn update_detector( &self, diff --git a/rusoto/services/health/Cargo.toml b/rusoto/services/health/Cargo.toml index 500e14f6def..84661dc04dd 100644 --- a/rusoto/services/health/Cargo.toml +++ b/rusoto/services/health/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_health" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/health/README.md b/rusoto/services/health/README.md index 375d9fc2a4c..87df1385dd3 100644 --- a/rusoto/services/health/README.md +++ b/rusoto/services/health/README.md @@ -23,9 +23,16 @@ To use `rusoto_health` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_health = "0.40.0" +rusoto_health = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/health/src/custom/mod.rs b/rusoto/services/health/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/health/src/custom/mod.rs +++ b/rusoto/services/health/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/health/src/generated.rs b/rusoto/services/health/src/generated.rs index 53c800dccf0..7d7ecb4ceb8 100644 --- a/rusoto/services/health/src/generated.rs +++ b/rusoto/services/health/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Information about an entity that is affected by a Health event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AffectedEntity { ///

The 12-digit AWS account number that contains the affected entity.

#[serde(rename = "awsAccountId")] @@ -94,7 +93,7 @@ pub struct DescribeAffectedEntitiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAffectedEntitiesResponse { ///

The entities that match the filter criteria.

#[serde(rename = "entities")] @@ -115,7 +114,7 @@ pub struct DescribeEntityAggregatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEntityAggregatesResponse { ///

The number of entities that are affected by each of the specified events.

#[serde(rename = "entityAggregates")] @@ -143,7 +142,7 @@ pub struct DescribeEventAggregatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventAggregatesResponse { ///

The number of events in each category that meet the optional filter criteria.

#[serde(rename = "eventAggregates")] @@ -167,7 +166,7 @@ pub struct DescribeEventDetailsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventDetailsResponse { ///

Error messages for any events that could not be retrieved.

#[serde(rename = "failedSet")] @@ -200,7 +199,7 @@ pub struct DescribeEventTypesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventTypesResponse { ///

A list of event types that match the filter criteria. Event types have a category (issue, accountNotification, or scheduledChange), a service (for example, EC2, RDS, DATAPIPELINE, BILLING), and a code (in the format AWS_SERVICE_DESCRIPTION ; for example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT).

#[serde(rename = "eventTypes")] @@ -233,7 +232,7 @@ pub struct DescribeEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventsResponse { ///

The events that match the specified filter criteria.

#[serde(rename = "events")] @@ -247,7 +246,7 @@ pub struct DescribeEventsResponse { ///

The number of entities that are affected by one or more events. Returned by the DescribeEntityAggregates operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntityAggregate { ///

The number entities that match the criteria for the specified events.

#[serde(rename = "count")] @@ -289,7 +288,7 @@ pub struct EntityFilter { ///

Summary information about an event, returned by the DescribeEvents operation. The DescribeEventDetails operation also returns this information, as well as the EventDescription and additional event metadata.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Event { ///

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

#[serde(rename = "arn")] @@ -335,7 +334,7 @@ pub struct Event { ///

The number of events of each issue type. Returned by the DescribeEventAggregates operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventAggregate { ///

The issue type for the associated count.

#[serde(rename = "aggregateValue")] @@ -349,7 +348,7 @@ pub struct EventAggregate { ///

Detailed information about an event. A combination of an Event object, an EventDescription object, and additional metadata about the event. Returned by the DescribeEventDetails operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventDetails { ///

Summary information about the event.

#[serde(rename = "event")] @@ -367,7 +366,7 @@ pub struct EventDetails { ///

Error information returned when a DescribeEventDetails operation cannot find a specified event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventDetailsErrorItem { ///

A message that describes the error.

#[serde(rename = "errorMessage")] @@ -729,10 +728,7 @@ impl AWSHealthClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AWSHealthClient { - AWSHealthClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -746,10 +742,14 @@ impl AWSHealthClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AWSHealthClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AWSHealthClient { + AWSHealthClient { client, region } } } diff --git a/rusoto/services/health/src/lib.rs b/rusoto/services/health/src/lib.rs index bff6b9ae0ea..8648db49d49 100644 --- a/rusoto/services/health/src/lib.rs +++ b/rusoto/services/health/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

AWS Health

The AWS Health API provides programmatic access to the AWS Health information that is presented in the AWS Personal Health Dashboard. You can get information about events that affect your AWS resources:

In addition, these operations provide information about event types and summary counts of events or affected entities:

The Health API requires a Business or Enterprise support plan from AWS Support. Calling the Health API from an account that does not have a Business or Enterprise support plan causes a SubscriptionRequiredException.

For authentication of requests, AWS Health uses the Signature Version 4 Signing Process.

See the AWS Health User Guide for information about how to use the API.

Service Endpoint

The HTTP endpoint for the AWS Health API is:

  • https://health.us-east-1.amazonaws.com

+//!

AWS Health

The AWS Health API provides programmatic access to the AWS Health information that is presented in the AWS Personal Health Dashboard. You can get information about events that affect your AWS resources:

In addition, these operations provide information about event types and summary counts of events or affected entities:

The Health API requires a Business or Enterprise support plan from AWS Support. Calling the Health API from an account that does not have a Business or Enterprise support plan causes a SubscriptionRequiredException.

For authentication of requests, AWS Health uses the Signature Version 4 Signing Process.

See the AWS Health User Guide for information about how to use the API.

Service Endpoint

The HTTP endpoint for the AWS Health API is:

  • https://health.us-east-1.amazonaws.com

//! //! If you're using the service, you're probably looking for [AWSHealthClient](struct.AWSHealthClient.html) and [AWSHealth](trait.AWSHealth.html). diff --git a/rusoto/services/iam/Cargo.toml b/rusoto/services/iam/Cargo.toml index 33dec79b651..5bb3f4be091 100644 --- a/rusoto/services/iam/Cargo.toml +++ b/rusoto/services/iam/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iam" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iam/README.md b/rusoto/services/iam/README.md index 889677ce163..f070a52f677 100644 --- a/rusoto/services/iam/README.md +++ b/rusoto/services/iam/README.md @@ -23,9 +23,16 @@ To use `rusoto_iam` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_iam = "0.40.0" +rusoto_iam = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iam/src/custom/mod.rs b/rusoto/services/iam/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iam/src/custom/mod.rs +++ b/rusoto/services/iam/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iam/src/generated.rs b/rusoto/services/iam/src/generated.rs index 36e235e7385..4185fcb8718 100644 --- a/rusoto/services/iam/src/generated.rs +++ b/rusoto/services/iam/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -34,6 +33,84 @@ use std::str::FromStr; use xml::reader::ParserConfig; use xml::EventReader; +///

An object that contains details about when a principal in the reported AWS Organizations entity last attempted to access an AWS service. A principal can be an IAM user, an IAM role, or the AWS account root user within the reported Organizations entity.

This data type is a response element in the GetOrganizationsAccessReport operation.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct AccessDetail { + ///

The path of the Organizations entity (root, organizational unit, or account) from which an authenticated principal last attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no principals (IAM users, IAM roles, or root users) in the reported Organizations entity attempted to access the service within the reporting period.

+ pub entity_path: Option, + ///

The date and time, in ISO 8601 date-time format, when an authenticated principal most recently attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no principals in the reported Organizations entity attempted to access the service within the reporting period.

+ pub last_authenticated_time: Option, + ///

The Region where the last service access attempt occurred.

This field is null if no principals in the reported Organizations entity attempted to access the service within the reporting period.

+ pub region: Option, + ///

The name of the service in which access was attempted.

+ pub service_name: String, + ///

The namespace of the service in which access was attempted.

To learn the service namespace of a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

+ pub service_namespace: String, + ///

The number of accounts with authenticated principals (root users, IAM users, and IAM roles) that attempted to access the service in the reporting period.

+ pub total_authenticated_entities: Option, +} + +struct AccessDetailDeserializer; +impl AccessDetailDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, AccessDetail, _>(tag_name, stack, |name, stack, obj| { + match name { + "EntityPath" => { + obj.entity_path = Some(OrganizationsEntityPathTypeDeserializer::deserialize( + "EntityPath", + stack, + )?); + } + "LastAuthenticatedTime" => { + obj.last_authenticated_time = Some(DateTypeDeserializer::deserialize( + "LastAuthenticatedTime", + stack, + )?); + } + "Region" => { + obj.region = Some(StringTypeDeserializer::deserialize("Region", stack)?); + } + "ServiceName" => { + obj.service_name = + ServiceNameTypeDeserializer::deserialize("ServiceName", stack)?; + } + "ServiceNamespace" => { + obj.service_namespace = + ServiceNamespaceTypeDeserializer::deserialize("ServiceNamespace", stack)?; + } + "TotalAuthenticatedEntities" => { + obj.total_authenticated_entities = Some(IntegerTypeDeserializer::deserialize( + "TotalAuthenticatedEntities", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }) + } +} +struct AccessDetailsDeserializer; +impl AccessDetailsDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result, XmlParseError> { + deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { + if name == "member" { + obj.push(AccessDetailDeserializer::deserialize("member", stack)?); + } else { + skip_tree(stack); + } + Ok(()) + }) + } +} ///

Contains information about an AWS access key.

This data type is used as a response element in the CreateAccessKey and ListAccessKeys operations.

The SecretAccessKey value is returned only in response to CreateAccessKey. You can get a secret access key only when you first create an access key; you cannot recover the secret access key later. If you lose a secret access key, you must create a new access key.

#[derive(Default, Debug, Clone, PartialEq)] pub struct AccessKey { @@ -95,11 +172,11 @@ impl AccessKeyIdTypeDeserializer { ///

Contains information about the last time an AWS access key was used since IAM began tracking this information on April 22, 2015.

This data type is used as a response element in the GetAccessKeyLastUsed operation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct AccessKeyLastUsed { - ///

The date and time, in ISO 8601 date-time format, when the access key was most recently used. This field is null in the following situations:

  • The user does not have an access key.

  • An access key exists but has not been used since IAM began tracking this information.

  • There is no sign-in data associated with the user

+ ///

The date and time, in ISO 8601 date-time format, when the access key was most recently used. This field is null in the following situations:

  • The user does not have an access key.

  • An access key exists but has not been used since IAM began tracking this information.

  • There is no sign-in data associated with the user.

pub last_used_date: String, - ///

The AWS region where this access key was most recently used. The value for this field is "N/A" in the following situations:

  • The user does not have an access key.

  • An access key exists but has not been used since IAM began tracking this information.

  • There is no sign-in data associated with the user

For more information about AWS regions, see Regions and Endpoints in the Amazon Web Services General Reference.

+ ///

The AWS Region where this access key was most recently used. The value for this field is "N/A" in the following situations:

  • The user does not have an access key.

  • An access key exists but has not been used since IAM began tracking this information.

  • There is no sign-in data associated with the user.

For more information about AWS Regions, see Regions and Endpoints in the Amazon Web Services General Reference.

pub region: String, - ///

The name of the AWS service with which this access key was most recently used. The value of this field is "N/A" in the following situations:

  • The user does not have an access key.

  • An access key exists but has not been used since IAM started tracking this information.

  • There is no sign-in data associated with the user

+ ///

The name of the AWS service with which this access key was most recently used. The value of this field is "N/A" in the following situations:

  • The user does not have an access key.

  • An access key exists but has not been used since IAM started tracking this information.

  • There is no sign-in data associated with the user.

pub service_name: String, } @@ -677,7 +754,7 @@ impl ColumnNumberDeserializer { Ok(obj) } } -///

Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

This data type is used as an input parameter to SimulateCustomPolicy and SimulateCustomPolicy .

+///

Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

This data type is used as an input parameter to SimulateCustomPolicy and SimulatePrincipalPolicy .

#[derive(Default, Debug, Clone, PartialEq)] pub struct ContextEntry { ///

The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId.

@@ -1192,7 +1269,7 @@ impl CreatePolicyVersionResponseDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateRoleRequest { - ///

The trust relationship policy document that grants an entity permission to assume the role.

in IAM, you must provide a JSON policy that has been converted to a string. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF)

  • The special characters tab (\u0009), line feed (\u000A), and carriage return (\u000D)

Upon success, the response includes the same trust policy as a URL-encoded JSON string.

+ ///

The trust relationship policy document that grants an entity permission to assume the role.

In IAM, you must provide a JSON policy that has been converted to a string. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF)

  • The special characters tab (\u0009), line feed (\u000A), and carriage return (\u000D)

Upon success, the response includes the same trust policy in JSON format.

pub assume_role_policy_document: String, ///

A description of the role.

pub description: Option, @@ -2092,7 +2169,7 @@ impl DeleteVirtualMFADeviceRequestSerializer { pub struct DeletionTaskFailureReasonType { ///

A short description of the reason that the service-linked role deletion failed.

pub reason: Option, - ///

A list of objects that contains details about the service-linked role deletion failure, if that information is returned by the service. If the service-linked role has active sessions or if any resources that were used by the role have not been deleted from the linked service, the role can't be deleted. This parameter includes a list of the resources that are associated with the role and the region in which the resources are being used.

+ ///

A list of objects that contains details about the service-linked role deletion failure, if that information is returned by the service. If the service-linked role has active sessions or if any resources that were used by the role have not been deleted from the linked service, the role can't be deleted. This parameter includes a list of the resources that are associated with the role and the Region in which the resources are being used.

pub role_usage_list: Option>, } @@ -2362,7 +2439,7 @@ impl EntityNameTypeDeserializer { Ok(obj) } } -///

Contains information about the reason that the operation failed.

This data type is used as a response element in the GetServiceLastAccessedDetails operation and the GetServiceLastAccessedDetailsWithEntities operation.

+///

Contains information about the reason that the operation failed.

This data type is used as a response element in the GetOrganizationsAccessReport, GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities operations.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ErrorDetails { ///

The error code associated with the operation failure.

@@ -2437,11 +2514,11 @@ pub struct EvaluationResult { pub eval_decision_details: Option<::std::collections::HashMap>, ///

The ARN of the resource that the indicated API operation was tested on.

pub eval_resource_name: Option, - ///

A list of the statements in the input policies that determine the result for this scenario. Remember that even if multiple statements allow the operation on the resource, if only one statement denies that operation, then the explicit deny overrides any allow. Inaddition, the deny statement is the only entry included in the result.

+ ///

A list of the statements in the input policies that determine the result for this scenario. Remember that even if multiple statements allow the operation on the resource, if only one statement denies that operation, then the explicit deny overrides any allow. In addition, the deny statement is the only entry included in the result.

pub matched_statements: Option>, ///

A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when the resource in a simulation is "*", either explicitly, or when the ResourceArns parameter blank. If you include a list of resources, then any missing context values are instead included under the ResourceSpecificResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

pub missing_context_values: Option>, - ///

A structure that details how AWS Organizations and its service control policies affect the results of the simulation. Only applies if the simulated user's account is part of an organization.

+ ///

A structure that details how Organizations and its service control policies affect the results of the simulation. Only applies if the simulated user's account is part of an organization.

pub organizations_decision_detail: Option, ///

The individual results of the simulation of the API operation specified in EvalActionName on each resource.

pub resource_specific_results: Option>, @@ -2580,6 +2657,61 @@ impl GenerateCredentialReportResponseDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] +pub struct GenerateOrganizationsAccessReportRequest { + ///

The path of the AWS Organizations entity (root, OU, or account). You can build an entity path using the known structure of your organization. For example, assume that your account ID is 123456789012 and its parent OU ID is ou-rge0-awsabcde. The organization root ID is r-f6g7h8i9j0example and your organization ID is o-a1b2c3d4e5. Your entity path is o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-rge0-awsabcde/123456789012.

+ pub entity_path: String, + ///

The identifier of the AWS Organizations service control policy (SCP). This parameter is optional.

This ID is used to generate information about when an account principal that is limited by the SCP attempted to access an AWS service.

+ pub organizations_policy_id: Option, +} + +/// Serialize `GenerateOrganizationsAccessReportRequest` contents to a `SignedRequest`. +struct GenerateOrganizationsAccessReportRequestSerializer; +impl GenerateOrganizationsAccessReportRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &GenerateOrganizationsAccessReportRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put(&format!("{}{}", prefix, "EntityPath"), &obj.entity_path); + if let Some(ref field_value) = obj.organizations_policy_id { + params.put( + &format!("{}{}", prefix, "OrganizationsPolicyId"), + &field_value, + ); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct GenerateOrganizationsAccessReportResponse { + ///

The job identifier that you can use in the GetOrganizationsAccessReport operation.

+ pub job_id: Option, +} + +struct GenerateOrganizationsAccessReportResponseDeserializer; +impl GenerateOrganizationsAccessReportResponseDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, GenerateOrganizationsAccessReportResponse, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "JobId" => { + obj.job_id = Some(JobIDTypeDeserializer::deserialize("JobId", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] pub struct GenerateServiceLastAccessedDetailsRequest { ///

The ARN of the IAM resource (user, group, role, or managed policy) used to generate information about when the resource was last used in an attempt to access an AWS service.

pub arn: String, @@ -3286,6 +3418,126 @@ impl GetOpenIDConnectProviderResponseDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] +pub struct GetOrganizationsAccessReportRequest { + ///

The identifier of the request generated by the GenerateOrganizationsAccessReport operation.

+ pub job_id: String, + ///

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

+ pub marker: Option, + ///

Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

If you do not include this parameter, the number of items defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

+ pub max_items: Option, + ///

The key that is used to sort the results. If you choose the namespace key, the results are returned in alphabetical order. If you choose the time key, the results are sorted numerically by the date and time.

+ pub sort_key: Option, +} + +/// Serialize `GetOrganizationsAccessReportRequest` contents to a `SignedRequest`. +struct GetOrganizationsAccessReportRequestSerializer; +impl GetOrganizationsAccessReportRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &GetOrganizationsAccessReportRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put(&format!("{}{}", prefix, "JobId"), &obj.job_id); + if let Some(ref field_value) = obj.marker { + params.put(&format!("{}{}", prefix, "Marker"), &field_value); + } + if let Some(ref field_value) = obj.max_items { + params.put(&format!("{}{}", prefix, "MaxItems"), &field_value); + } + if let Some(ref field_value) = obj.sort_key { + params.put(&format!("{}{}", prefix, "SortKey"), &field_value); + } + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct GetOrganizationsAccessReportResponse { + ///

An object that contains details about the most recent attempt to access the service.

+ pub access_details: Option>, + pub error_details: Option, + ///

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

+ pub is_truncated: Option, + ///

The date and time, in ISO 8601 date-time format, when the generated report job was completed or failed.

This field is null if the job is still in progress, as indicated by a job status value of IN_PROGRESS.

+ pub job_completion_date: Option, + ///

The date and time, in ISO 8601 date-time format, when the report job was created.

+ pub job_creation_date: String, + ///

The status of the job.

+ pub job_status: String, + ///

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

+ pub marker: Option, + ///

The number of services that the applicable SCPs allow account principals to access.

+ pub number_of_services_accessible: Option, + ///

The number of services that account principals are allowed but did not attempt to access.

+ pub number_of_services_not_accessed: Option, +} + +struct GetOrganizationsAccessReportResponseDeserializer; +impl GetOrganizationsAccessReportResponseDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, GetOrganizationsAccessReportResponse, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "AccessDetails" => { + obj.access_details.get_or_insert(vec![]).extend( + AccessDetailsDeserializer::deserialize("AccessDetails", stack)?, + ); + } + "ErrorDetails" => { + obj.error_details = Some(ErrorDetailsDeserializer::deserialize( + "ErrorDetails", + stack, + )?); + } + "IsTruncated" => { + obj.is_truncated = + Some(BooleanTypeDeserializer::deserialize("IsTruncated", stack)?); + } + "JobCompletionDate" => { + obj.job_completion_date = Some(DateTypeDeserializer::deserialize( + "JobCompletionDate", + stack, + )?); + } + "JobCreationDate" => { + obj.job_creation_date = + DateTypeDeserializer::deserialize("JobCreationDate", stack)?; + } + "JobStatus" => { + obj.job_status = + JobStatusTypeDeserializer::deserialize("JobStatus", stack)?; + } + "Marker" => { + obj.marker = Some(MarkerTypeDeserializer::deserialize("Marker", stack)?); + } + "NumberOfServicesAccessible" => { + obj.number_of_services_accessible = + Some(IntegerTypeDeserializer::deserialize( + "NumberOfServicesAccessible", + stack, + )?); + } + "NumberOfServicesNotAccessed" => { + obj.number_of_services_not_accessed = + Some(IntegerTypeDeserializer::deserialize( + "NumberOfServicesNotAccessed", + stack, + )?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] pub struct GetPolicyRequest { ///

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

pub policy_arn: String, @@ -3699,7 +3951,7 @@ pub struct GetServiceLastAccessedDetailsResponse { pub error: Option, ///

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

pub is_truncated: Option, - ///

The date and time, in ISO 8601 date-time format, when the generated report job was completed or failed.

This field is null if the job is still in progress, as indicated by a JobStatus value of IN_PROGRESS.

+ ///

The date and time, in ISO 8601 date-time format, when the generated report job was completed or failed.

This field is null if the job is still in progress, as indicated by a job status value of IN_PROGRESS.

pub job_completion_date: String, ///

The date and time, in ISO 8601 date-time format, when the report job was created.

pub job_creation_date: String, @@ -3809,7 +4061,7 @@ pub struct GetServiceLastAccessedDetailsWithEntitiesResponse { pub error: Option, ///

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

pub is_truncated: Option, - ///

The date and time, in ISO 8601 date-time format, when the generated report job was completed or failed.

+ ///

The date and time, in ISO 8601 date-time format, when the generated report job was completed or failed.

This field is null if the job is still in progress, as indicated by a job status value of IN_PROGRESS.

pub job_completion_date: String, ///

The date and time, in ISO 8601 date-time format, when the report job was created.

pub job_creation_date: String, @@ -6649,6 +6901,17 @@ impl ManagedPolicyDetailListTypeDeserializer { }) } } +struct MarkerTypeDeserializer; +impl MarkerTypeDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} struct MaxPasswordAgeTypeDeserializer; impl MaxPasswordAgeTypeDeserializer { #[allow(unused_variables)] @@ -6746,10 +7009,10 @@ impl OpenIDConnectProviderUrlTypeDeserializer { Ok(obj) } } -///

Contains information about AWS Organizations's effect on a policy simulation.

+///

Contains information about the effect that Organizations has on a policy simulation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct OrganizationsDecisionDetail { - ///

Specifies whether the simulated operation is allowed by the AWS Organizations service control policies that impact the simulated user's account.

+ ///

Specifies whether the simulated operation is allowed by the Organizations service control policies that impact the simulated user's account.

pub allowed_by_organizations: Option, } @@ -6778,6 +7041,17 @@ impl OrganizationsDecisionDetailDeserializer { ) } } +struct OrganizationsEntityPathTypeDeserializer; +impl OrganizationsEntityPathTypeDeserializer { + #[allow(unused_variables)] + fn deserialize(tag_name: &str, stack: &mut T) -> Result { + start_element(tag_name, stack)?; + let obj = characters(stack)?; + end_element(tag_name, stack)?; + + Ok(obj) + } +} ///

Contains information about the account password policy.

This data type is used as a response element in the GetAccountPasswordPolicy operation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct PasswordPolicy { @@ -7544,7 +7818,7 @@ impl PublicKeyMaterialTypeDeserializer { } #[derive(Default, Debug, Clone, PartialEq)] pub struct PutGroupPolicyRequest { - ///

The name of the group to associate the policy with.

&regex-name;.

+ ///

The name of the group to associate the policy with.

This parameter allows (through its regex pattern) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-.

pub group_name: String, ///

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF)

  • The special characters tab (\u0009), line feed (\u000A), and carriage return (\u000D)

pub policy_document: String, @@ -8295,7 +8569,7 @@ impl RoleUsageListTypeDeserializer { ///

An object that contains details about how a service-linked role is used, if that information is returned by the service.

This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus operation.

#[derive(Default, Debug, Clone, PartialEq)] pub struct RoleUsageType { - ///

The name of the region where the service-linked role is being used.

+ ///

The name of the Region where the service-linked role is being used.

pub region: Option, ///

The name of the resource that is using the service-linked role.

pub resources: Option>, @@ -8658,7 +8932,7 @@ pub struct ServiceLastAccessed { pub service_name: String, ///

The namespace of the service in which access was attempted.

To learn the service namespace of a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

pub service_namespace: String, - ///

The total number of authenticated entities that have attempted to access the service.

This field is null if no IAM entities attempted to access the service within the reporting period.

+ ///

The total number of authenticated principals (root user, IAM users, or IAM roles) that have attempted to access the service.

This field is null if no principals attempted to access the service within the reporting period.

pub total_authenticated_entities: Option, } @@ -9166,7 +9440,7 @@ pub struct SimulatePrincipalPolicyRequest { pub action_names: Vec, ///

The ARN of the IAM user that you want to specify as the simulated caller of the API operations. If you do not specify a CallerArn, it defaults to the ARN of the user that you specify in PolicySourceArn, if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result is that you simulate calling the API operations as Bob, as if Bob had David's policies.

You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

pub caller_arn: Option, - ///

A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.

+ ///

A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permissions policies, the corresponding value is supplied.

pub context_entries: Option>, ///

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

pub marker: Option, @@ -10357,7 +10631,7 @@ pub struct User { pub arn: String, ///

The date and time, in ISO 8601 date-time format, when the user was created.

pub create_date: String, - ///

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

  • The user never had a password.

  • A password exists but has not been used since IAM started tracking this information on October 20, 2014.

A null valuedoes not mean that the user never had a password. Also, if the user does not currently have a password, but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

+ ///

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

  • The user never had a password.

  • A password exists but has not been used since IAM started tracking this information on October 20, 2014.

A null value does not mean that the user never had a password. Also, if the user does not currently have a password, but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

pub password_last_used: Option, ///

The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

pub path: String, @@ -14188,6 +14462,59 @@ impl Error for GenerateCredentialReportError { } } } +/// Errors returned by GenerateOrganizationsAccessReport +#[derive(Debug, PartialEq)] +pub enum GenerateOrganizationsAccessReportError { + ///

The request failed because the maximum number of concurrent requests for this account are already running.

+ ReportGenerationLimitExceeded(String), +} + +impl GenerateOrganizationsAccessReportError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "ReportGenerationLimitExceeded" => { + return RusotoError::Service( + GenerateOrganizationsAccessReportError::ReportGenerationLimitExceeded( + parsed_error.message, + ), + ) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for GenerateOrganizationsAccessReportError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GenerateOrganizationsAccessReportError { + fn description(&self) -> &str { + match *self { + GenerateOrganizationsAccessReportError::ReportGenerationLimitExceeded(ref cause) => { + cause + } + } + } +} /// Errors returned by GenerateServiceLastAccessedDetails #[derive(Debug, PartialEq)] pub enum GenerateServiceLastAccessedDetailsError { @@ -14251,10 +14578,7 @@ impl Error for GenerateServiceLastAccessedDetailsError { } /// Errors returned by GetAccessKeyLastUsed #[derive(Debug, PartialEq)] -pub enum GetAccessKeyLastUsedError { - ///

The request was rejected because it referenced a resource entity that does not exist. The error message describes the resource.

- NoSuchEntity(String), -} +pub enum GetAccessKeyLastUsedError {} impl GetAccessKeyLastUsedError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { @@ -14264,11 +14588,6 @@ impl GetAccessKeyLastUsedError { find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { - "NoSuchEntity" => { - return RusotoError::Service(GetAccessKeyLastUsedError::NoSuchEntity( - parsed_error.message, - )) - } _ => {} } } @@ -14291,9 +14610,7 @@ impl fmt::Display for GetAccessKeyLastUsedError { } impl Error for GetAccessKeyLastUsedError { fn description(&self) -> &str { - match *self { - GetAccessKeyLastUsedError::NoSuchEntity(ref cause) => cause, - } + match *self {} } } /// Errors returned by GetAccountAuthorizationDetails @@ -14917,6 +15234,55 @@ impl Error for GetOpenIDConnectProviderError { } } } +/// Errors returned by GetOrganizationsAccessReport +#[derive(Debug, PartialEq)] +pub enum GetOrganizationsAccessReportError { + ///

The request was rejected because it referenced a resource entity that does not exist. The error message describes the resource.

+ NoSuchEntity(String), +} + +impl GetOrganizationsAccessReportError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + "NoSuchEntity" => { + return RusotoError::Service( + GetOrganizationsAccessReportError::NoSuchEntity(parsed_error.message), + ) + } + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for GetOrganizationsAccessReportError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetOrganizationsAccessReportError { + fn description(&self) -> &str { + match *self { + GetOrganizationsAccessReportError::NoSuchEntity(ref cause) => cause, + } + } +} /// Errors returned by GetPolicy #[derive(Debug, PartialEq)] pub enum GetPolicyError { @@ -19866,7 +20232,16 @@ pub trait Iam { &self, ) -> RusotoFuture; - ///

Generates a request for a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

  • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every AWS service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

  • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific AWS service.

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

+ ///

Generates a report for service last accessed data for AWS Organizations. You can generate a report for any entities (organization root, organizational unit, or account) or policies in your organization.

To call this operation, you must be signed in using your AWS Organizations master account credentials. You can use your long-term IAM user or root user credentials, or temporary credentials from assuming an IAM role. SCPs must be enabled for your organization root. You must have the required IAM and AWS Organizations permissions. For more information, see Refining Permissions Using Service Last Accessed Data in the IAM User Guide.

You can generate a service last accessed data report for entities by specifying only the entity's path. This data includes a list of services that are allowed by any service control policies (SCPs) that apply to the entity.

You can generate a service last accessed data report for a policy by specifying an entity's path and an optional AWS Organizations policy ID. This data includes a list of services that are allowed by the specified SCP.

For each service in both report types, the data includes the most recent account activity that the policy allows to account principals in the entity or the entity's children. For important information about the data, reporting period, permissions required, troubleshooting, and supported Regions see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

The data includes all attempts to access AWS, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that an account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport operation to check the status of the report generation. To check the status of this request, use the JobId parameter in the GetOrganizationsAccessReport operation and test the JobStatus response parameter. When the job is complete, you can retrieve the report.

To generate a service last accessed data report for entities, specify an entity path without specifying the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned in the report.

  • Root – When you specify the organizations root as the entity, the resulting report lists all of the services allowed by SCPs that are attached to your root. For each service, the report includes data for all accounts in your organization except the master account, because the master account is not limited by SCPs.

  • OU – When you specify an organizational unit (OU) as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the OU and its parents. For each service, the report includes data for all accounts in the OU or its children. This data excludes the master account, because the master account is not limited by SCPs.

  • Master account – When you specify the master account, the resulting report lists all AWS services, because the master account is not limited by SCPs. For each service, the report includes data for only the master account.

  • Account – When you specify another account as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the account and its parents. For each service, the report includes data for only the specified account.

To generate a service last accessed data report for policies, specify an entity path and the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned for each service.

  • Root – When you specify the root entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in your organization to which the SCP applies. This data excludes the master account, because the master account is not limited by SCPs. If the SCP is not attached to any entities in the organization, then the report will return a list of services with no data.

  • OU – When you specify an OU entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in the OU or its children to which the SCP applies. This means that other accounts outside the OU that are affected by the SCP might not be included in the data. This data excludes the master account, because the master account is not limited by SCPs. If the SCP is not attached to the OU or one of its children, the report will return a list of services with no data.

  • Master account – When you specify the master account, the resulting report lists all AWS services, because the master account is not limited by SCPs. If you specify a policy ID in the CLI or API, the policy is ignored. For each service, the report includes data for only the master account.

  • Account – When you specify another account entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for only the specified account. This means that other accounts in the organization that are affected by the SCP might not be included in the data. If the SCP is not attached to the account, the report will return a list of services with no data.

Service last accessed data does not use other policy types when determining whether a principal could access a service. These other policy types include identity-based policies, resource-based policies, access control lists, IAM permissions boundaries, and STS assume role policies. It only applies SCP logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

+ fn generate_organizations_access_report( + &self, + input: GenerateOrganizationsAccessReportRequest, + ) -> RusotoFuture< + GenerateOrganizationsAccessReportResponse, + GenerateOrganizationsAccessReportError, + >; + + ///

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

  • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every AWS service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

  • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific AWS service.

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

fn generate_service_last_accessed_details( &self, input: GenerateServiceLastAccessedDetailsRequest, @@ -19941,6 +20316,12 @@ pub trait Iam { input: GetOpenIDConnectProviderRequest, ) -> RusotoFuture; + ///

Retrieves the service last accessed data report for AWS Organizations that was previously generated using the GenerateOrganizationsAccessReport operation. This operation retrieves the status of your report job and the report contents.

Depending on the parameters that you passed when you generated the report, the data returned could include different information. For details, see GenerateOrganizationsAccessReport.

To call this operation, you must be signed in to the master account in your organization. SCPs must be enabled for your organization root. You must have permissions to perform this operation. For more information, see Refining Permissions Using Service Last Accessed Data in the IAM User Guide.

For each service that principals in an account (root users, IAM users, or IAM roles) could access using SCPs, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, it returns the reason that it failed.

By default, the list is sorted by service namespace.

+ fn get_organizations_access_report( + &self, + input: GetOrganizationsAccessReportRequest, + ) -> RusotoFuture; + ///

Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API. This API returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

fn get_policy( &self, @@ -19980,7 +20361,7 @@ pub trait Iam { input: GetServerCertificateRequest, ) -> RusotoFuture; - ///

After you generate a user, group, role, or policy report using the GenerateServiceLastAccessedDetails operation, you can use the JobId parameter in GetServiceLastAccessedDetails. This operation retrieves the status of your report job and a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

  • User – Returns the user ARN that you used to generate the report

  • Group – Returns the ARN of the group member (user) that last attempted to access the service

  • Role – Returns the role ARN that you used to generate the report

  • Policy – Returns the ARN of the user or role that last used the policy to attempt to access the service

By default, the list is sorted by service namespace.

+ ///

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

  • User – Returns the user ARN that you used to generate the report

  • Group – Returns the ARN of the group member (user) that last attempted to access the service

  • Role – Returns the role ARN that you used to generate the report

  • Policy – Returns the ARN of the user or role that last used the policy to attempt to access the service

By default, the list is sorted by service namespace.

fn get_service_last_accessed_details( &self, input: GetServiceLastAccessedDetailsRequest, @@ -20382,10 +20763,7 @@ impl IamClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> IamClient { - IamClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -20399,10 +20777,14 @@ impl IamClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - IamClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> IamClient { + IamClient { client, region } } } @@ -20631,7 +21013,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20710,7 +21092,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20758,7 +21140,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20809,7 +21191,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20857,7 +21239,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20908,7 +21290,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20958,7 +21340,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21009,7 +21391,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21060,7 +21442,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21108,7 +21490,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21159,7 +21541,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21210,7 +21592,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21258,7 +21640,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21753,7 +22135,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22067,7 +22449,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22086,7 +22468,60 @@ impl Iam for IamClient { }) } - ///

Generates a request for a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

  • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every AWS service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

  • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific AWS service.

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

+ ///

Generates a report for service last accessed data for AWS Organizations. You can generate a report for any entities (organization root, organizational unit, or account) or policies in your organization.

To call this operation, you must be signed in using your AWS Organizations master account credentials. You can use your long-term IAM user or root user credentials, or temporary credentials from assuming an IAM role. SCPs must be enabled for your organization root. You must have the required IAM and AWS Organizations permissions. For more information, see Refining Permissions Using Service Last Accessed Data in the IAM User Guide.

You can generate a service last accessed data report for entities by specifying only the entity's path. This data includes a list of services that are allowed by any service control policies (SCPs) that apply to the entity.

You can generate a service last accessed data report for a policy by specifying an entity's path and an optional AWS Organizations policy ID. This data includes a list of services that are allowed by the specified SCP.

For each service in both report types, the data includes the most recent account activity that the policy allows to account principals in the entity or the entity's children. For important information about the data, reporting period, permissions required, troubleshooting, and supported Regions see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

The data includes all attempts to access AWS, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that an account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport operation to check the status of the report generation. To check the status of this request, use the JobId parameter in the GetOrganizationsAccessReport operation and test the JobStatus response parameter. When the job is complete, you can retrieve the report.

To generate a service last accessed data report for entities, specify an entity path without specifying the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned in the report.

  • Root – When you specify the organizations root as the entity, the resulting report lists all of the services allowed by SCPs that are attached to your root. For each service, the report includes data for all accounts in your organization except the master account, because the master account is not limited by SCPs.

  • OU – When you specify an organizational unit (OU) as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the OU and its parents. For each service, the report includes data for all accounts in the OU or its children. This data excludes the master account, because the master account is not limited by SCPs.

  • Master account – When you specify the master account, the resulting report lists all AWS services, because the master account is not limited by SCPs. For each service, the report includes data for only the master account.

  • Account – When you specify another account as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the account and its parents. For each service, the report includes data for only the specified account.

To generate a service last accessed data report for policies, specify an entity path and the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned for each service.

  • Root – When you specify the root entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in your organization to which the SCP applies. This data excludes the master account, because the master account is not limited by SCPs. If the SCP is not attached to any entities in the organization, then the report will return a list of services with no data.

  • OU – When you specify an OU entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in the OU or its children to which the SCP applies. This means that other accounts outside the OU that are affected by the SCP might not be included in the data. This data excludes the master account, because the master account is not limited by SCPs. If the SCP is not attached to the OU or one of its children, the report will return a list of services with no data.

  • Master account – When you specify the master account, the resulting report lists all AWS services, because the master account is not limited by SCPs. If you specify a policy ID in the CLI or API, the policy is ignored. For each service, the report includes data for only the master account.

  • Account – When you specify another account entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for only the specified account. This means that other accounts in the organization that are affected by the SCP might not be included in the data. If the SCP is not attached to the account, the report will return a list of services with no data.

Service last accessed data does not use other policy types when determining whether a principal could access a service. These other policy types include identity-based policies, resource-based policies, access control lists, IAM permissions boundaries, and STS assume role policies. It only applies SCP logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

+ fn generate_organizations_access_report( + &self, + input: GenerateOrganizationsAccessReportRequest, + ) -> RusotoFuture< + GenerateOrganizationsAccessReportResponse, + GenerateOrganizationsAccessReportError, + > { + let mut request = SignedRequest::new("POST", "iam", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "GenerateOrganizationsAccessReport"); + params.put("Version", "2010-05-08"); + GenerateOrganizationsAccessReportRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(GenerateOrganizationsAccessReportError::from_response( + response, + )) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = GenerateOrganizationsAccessReportResponse::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = GenerateOrganizationsAccessReportResponseDeserializer::deserialize( + "GenerateOrganizationsAccessReportResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

  • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every AWS service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

  • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific AWS service.

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

fn generate_service_last_accessed_details( &self, input: GenerateServiceLastAccessedDetailsRequest, @@ -22120,7 +22555,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22170,7 +22605,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22219,7 +22654,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22266,7 +22701,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22316,7 +22751,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22364,7 +22799,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22414,7 +22849,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22463,7 +22898,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22511,7 +22946,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22560,7 +22995,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22611,7 +23046,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22662,7 +23097,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22710,7 +23145,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22729,6 +23164,54 @@ impl Iam for IamClient { }) } + ///

Retrieves the service last accessed data report for AWS Organizations that was previously generated using the GenerateOrganizationsAccessReport operation. This operation retrieves the status of your report job and the report contents.

Depending on the parameters that you passed when you generated the report, the data returned could include different information. For details, see GenerateOrganizationsAccessReport.

To call this operation, you must be signed in to the master account in your organization. SCPs must be enabled for your organization root. You must have permissions to perform this operation. For more information, see Refining Permissions Using Service Last Accessed Data in the IAM User Guide.

For each service that principals in an account (root users, IAM users, or IAM roles) could access using SCPs, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, it returns the reason that it failed.

By default, the list is sorted by service namespace.

+ fn get_organizations_access_report( + &self, + input: GetOrganizationsAccessReportRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "iam", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "GetOrganizationsAccessReport"); + params.put("Version", "2010-05-08"); + GetOrganizationsAccessReportRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new(response.buffer().from_err().and_then(|response| { + Err(GetOrganizationsAccessReportError::from_response(response)) + })); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = GetOrganizationsAccessReportResponse::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = GetOrganizationsAccessReportResponseDeserializer::deserialize( + "GetOrganizationsAccessReportResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + ///

Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API. This API returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

fn get_policy( &self, @@ -22761,7 +23244,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22810,7 +23293,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22858,7 +23341,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22906,7 +23389,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22957,7 +23440,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23008,7 +23491,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23058,7 +23541,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23077,7 +23560,7 @@ impl Iam for IamClient { }) } - ///

After you generate a user, group, role, or policy report using the GenerateServiceLastAccessedDetails operation, you can use the JobId parameter in GetServiceLastAccessedDetails. This operation retrieves the status of your report job and a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

  • User – Returns the user ARN that you used to generate the report

  • Group – Returns the ARN of the group member (user) that last attempted to access the service

  • Role – Returns the role ARN that you used to generate the report

  • Policy – Returns the ARN of the user or role that last used the policy to attempt to access the service

By default, the list is sorted by service namespace.

+ ///

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

  • User – Returns the user ARN that you used to generate the report

  • Group – Returns the ARN of the group member (user) that last attempted to access the service

  • Role – Returns the role ARN that you used to generate the report

  • Policy – Returns the ARN of the user or role that last used the policy to attempt to access the service

By default, the list is sorted by service namespace.

fn get_service_last_accessed_details( &self, input: GetServiceLastAccessedDetailsRequest, @@ -23107,7 +23590,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23162,7 +23645,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23216,7 +23699,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23264,7 +23747,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23312,7 +23795,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23363,7 +23846,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23414,7 +23897,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23462,7 +23945,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23510,7 +23993,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23558,7 +24041,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23606,7 +24089,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23657,7 +24140,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23708,7 +24191,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23759,7 +24242,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23809,7 +24292,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23857,7 +24340,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23908,7 +24391,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23956,7 +24439,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24007,7 +24490,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24060,7 +24543,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24111,7 +24594,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24162,7 +24645,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24213,7 +24696,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24264,7 +24747,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24313,7 +24796,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24364,7 +24847,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24412,7 +24895,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24461,7 +24944,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24509,7 +24992,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24560,7 +25043,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24611,7 +25094,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24662,7 +25145,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24708,7 +25191,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24968,7 +25451,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25098,7 +25581,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25146,7 +25629,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25455,7 +25938,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25503,7 +25986,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25554,7 +26037,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25735,7 +26218,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25783,7 +26266,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25831,7 +26314,7 @@ impl Iam for IamClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/importexport/Cargo.toml b/rusoto/services/importexport/Cargo.toml index 1fc6573eb08..13b976d15bc 100644 --- a/rusoto/services/importexport/Cargo.toml +++ b/rusoto/services/importexport/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_importexport" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/importexport/README.md b/rusoto/services/importexport/README.md index c96b95bdfdd..35889779a32 100644 --- a/rusoto/services/importexport/README.md +++ b/rusoto/services/importexport/README.md @@ -23,9 +23,16 @@ To use `rusoto_importexport` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_importexport = "0.40.0" +rusoto_importexport = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/importexport/src/custom/mod.rs b/rusoto/services/importexport/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/importexport/src/custom/mod.rs +++ b/rusoto/services/importexport/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/importexport/src/generated.rs b/rusoto/services/importexport/src/generated.rs index 477b4afeaf6..2760f6926ad 100644 --- a/rusoto/services/importexport/src/generated.rs +++ b/rusoto/services/importexport/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -1580,10 +1579,7 @@ impl ImportExportClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ImportExportClient { - ImportExportClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1597,10 +1593,14 @@ impl ImportExportClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ImportExportClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ImportExportClient { + ImportExportClient { client, region } } } @@ -1639,7 +1639,7 @@ impl ImportExport for ImportExportClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1690,7 +1690,7 @@ impl ImportExport for ImportExportClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1744,7 +1744,7 @@ impl ImportExport for ImportExportClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1797,7 +1797,7 @@ impl ImportExport for ImportExportClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1844,7 +1844,7 @@ impl ImportExport for ImportExportClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1894,7 +1894,7 @@ impl ImportExport for ImportExportClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/inspector/Cargo.toml b/rusoto/services/inspector/Cargo.toml index b1441b86261..53e72443908 100644 --- a/rusoto/services/inspector/Cargo.toml +++ b/rusoto/services/inspector/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_inspector" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/inspector/README.md b/rusoto/services/inspector/README.md index 8299777444e..f49fd9c9d38 100644 --- a/rusoto/services/inspector/README.md +++ b/rusoto/services/inspector/README.md @@ -23,9 +23,16 @@ To use `rusoto_inspector` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_inspector = "0.40.0" +rusoto_inspector = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/inspector/src/custom/mod.rs b/rusoto/services/inspector/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/inspector/src/custom/mod.rs +++ b/rusoto/services/inspector/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/inspector/src/generated.rs b/rusoto/services/inspector/src/generated.rs index aa53313f6c3..e9810285f1c 100644 --- a/rusoto/services/inspector/src/generated.rs +++ b/rusoto/services/inspector/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -35,7 +34,7 @@ pub struct AddAttributesToFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddAttributesToFindingsResponse { ///

Attribute details that cannot be described. An error code is provided for each failed item.

#[serde(rename = "failedItems")] @@ -64,7 +63,7 @@ pub struct AgentFilter { ///

Used as a response element in the PreviewAgents action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AgentPreview { ///

The health status of the Amazon Inspector Agent.

#[serde(rename = "agentHealth")] @@ -101,7 +100,7 @@ pub struct AgentPreview { ///

A snapshot of an Amazon Inspector assessment run that contains the findings of the assessment run .

Used as the response element in the DescribeAssessmentRuns action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssessmentRun { ///

The ARN of the assessment run.

#[serde(rename = "arn")] @@ -154,7 +153,7 @@ pub struct AssessmentRun { ///

Contains information about an Amazon Inspector agent. This data type is used as a response element in the ListAssessmentRunAgents action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssessmentRunAgent { ///

The current health state of the agent.

#[serde(rename = "agentHealth")] @@ -216,7 +215,7 @@ pub struct AssessmentRunFilter { ///

Used as one of the elements of the AssessmentRun data type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssessmentRunNotification { ///

The date of the notification.

#[serde(rename = "date")] @@ -243,7 +242,7 @@ pub struct AssessmentRunNotification { ///

Used as one of the elements of the AssessmentRun data type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssessmentRunStateChange { ///

The assessment run state.

#[serde(rename = "state")] @@ -255,7 +254,7 @@ pub struct AssessmentRunStateChange { ///

Contains information about an Amazon Inspector application. This data type is used as the response element in the DescribeAssessmentTargets action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssessmentTarget { ///

The ARN that specifies the Amazon Inspector assessment target.

#[serde(rename = "arn")] @@ -286,7 +285,7 @@ pub struct AssessmentTargetFilter { ///

Contains information about an Amazon Inspector assessment template. This data type is used as the response element in the DescribeAssessmentTemplates action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssessmentTemplate { ///

The ARN of the assessment template.

#[serde(rename = "arn")] @@ -337,7 +336,7 @@ pub struct AssessmentTemplateFilter { ///

A collection of attributes of the host from which the finding is generated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssetAttributes { ///

The ID of the agent that is installed on the EC2 instance where the finding is generated.

#[serde(rename = "agentId")] @@ -396,7 +395,7 @@ pub struct CreateAssessmentTargetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAssessmentTargetResponse { ///

The ARN that specifies the assessment target that is created.

#[serde(rename = "assessmentTargetArn")] @@ -424,7 +423,7 @@ pub struct CreateAssessmentTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAssessmentTemplateResponse { ///

The ARN that specifies the assessment template that is created.

#[serde(rename = "assessmentTemplateArn")] @@ -439,7 +438,7 @@ pub struct CreateExclusionsPreviewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateExclusionsPreviewResponse { ///

Specifies the unique identifier of the requested exclusions preview. You can use the unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview API.

#[serde(rename = "previewToken")] @@ -454,7 +453,7 @@ pub struct CreateResourceGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceGroupResponse { ///

The ARN that specifies the resource group that is created.

#[serde(rename = "resourceGroupArn")] @@ -490,7 +489,7 @@ pub struct DescribeAssessmentRunsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAssessmentRunsResponse { ///

Information about the assessment run.

#[serde(rename = "assessmentRuns")] @@ -508,7 +507,7 @@ pub struct DescribeAssessmentTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAssessmentTargetsResponse { ///

Information about the assessment targets.

#[serde(rename = "assessmentTargets")] @@ -525,7 +524,7 @@ pub struct DescribeAssessmentTemplatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAssessmentTemplatesResponse { ///

Information about the assessment templates.

#[serde(rename = "assessmentTemplates")] @@ -536,7 +535,7 @@ pub struct DescribeAssessmentTemplatesResponse { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCrossAccountAccessRoleResponse { ///

The date when the cross-account access role was registered.

#[serde(rename = "registeredAt")] @@ -561,7 +560,7 @@ pub struct DescribeExclusionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeExclusionsResponse { ///

Information about the exclusions.

#[serde(rename = "exclusions")] @@ -583,7 +582,7 @@ pub struct DescribeFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFindingsResponse { ///

Finding details that cannot be described. An error code is provided for each failed item.

#[serde(rename = "failedItems")] @@ -601,7 +600,7 @@ pub struct DescribeResourceGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeResourceGroupsResponse { ///

Resource group details that cannot be described. An error code is provided for each failed item.

#[serde(rename = "failedItems")] @@ -623,7 +622,7 @@ pub struct DescribeRulesPackagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRulesPackagesResponse { ///

Rules package details that cannot be described. An error code is provided for each failed item.

#[serde(rename = "failedItems")] @@ -648,7 +647,7 @@ pub struct DurationRange { ///

This data type is used in the Subscription data type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventSubscription { ///

The event for which Amazon Simple Notification Service (SNS) notifications are sent.

#[serde(rename = "event")] @@ -660,7 +659,7 @@ pub struct EventSubscription { ///

Contains information about what was excluded from an assessment run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Exclusion { ///

The ARN that specifies the exclusion.

#[serde(rename = "arn")] @@ -685,7 +684,7 @@ pub struct Exclusion { ///

Contains information about what is excluded from an assessment run given the current state of the assessment template.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExclusionPreview { ///

The system-defined attributes for the exclusion preview.

#[serde(rename = "attributes")] @@ -707,7 +706,7 @@ pub struct ExclusionPreview { ///

Includes details about the failed items.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedItemDetails { ///

The status code of a failed item.

#[serde(rename = "failureCode")] @@ -719,7 +718,7 @@ pub struct FailedItemDetails { ///

Contains information about an Amazon Inspector finding. This data type is used as the response element in the DescribeFindings action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Finding { ///

The ARN that specifies the finding.

#[serde(rename = "arn")] @@ -841,7 +840,7 @@ pub struct GetAssessmentReportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAssessmentReportResponse { ///

Specifies the status of the request to generate an assessment report.

#[serde(rename = "status")] @@ -875,7 +874,7 @@ pub struct GetExclusionsPreviewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetExclusionsPreviewResponse { ///

Information about the exclusions included in the preview.

#[serde(rename = "exclusionPreviews")] @@ -898,7 +897,7 @@ pub struct GetTelemetryMetadataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTelemetryMetadataResponse { ///

Telemetry details.

#[serde(rename = "telemetryMetadata")] @@ -907,7 +906,7 @@ pub struct GetTelemetryMetadataResponse { ///

This data type is used in the Finding data type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InspectorServiceAttributes { ///

The ARN of the assessment run during which the finding is generated.

#[serde(rename = "assessmentRunArn")] @@ -942,7 +941,7 @@ pub struct ListAssessmentRunAgentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssessmentRunAgentsResponse { ///

A list of ARNs that specifies the agents returned by the action.

#[serde(rename = "assessmentRunAgents")] @@ -974,7 +973,7 @@ pub struct ListAssessmentRunsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssessmentRunsResponse { ///

A list of ARNs that specifies the assessment runs that are returned by the action.

#[serde(rename = "assessmentRunArns")] @@ -1002,7 +1001,7 @@ pub struct ListAssessmentTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssessmentTargetsResponse { ///

A list of ARNs that specifies the assessment targets that are returned by the action.

#[serde(rename = "assessmentTargetArns")] @@ -1034,7 +1033,7 @@ pub struct ListAssessmentTemplatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssessmentTemplatesResponse { ///

A list of ARNs that specifies the assessment templates returned by the action.

#[serde(rename = "assessmentTemplateArns")] @@ -1062,7 +1061,7 @@ pub struct ListEventSubscriptionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEventSubscriptionsResponse { ///

When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

#[serde(rename = "nextToken")] @@ -1089,7 +1088,7 @@ pub struct ListExclusionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListExclusionsResponse { ///

A list of exclusions' ARNs returned by the action.

#[serde(rename = "exclusionArns")] @@ -1121,7 +1120,7 @@ pub struct ListFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFindingsResponse { ///

A list of ARNs that specifies the findings returned by the action.

#[serde(rename = "findingArns")] @@ -1145,7 +1144,7 @@ pub struct ListRulesPackagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRulesPackagesResponse { ///

When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

#[serde(rename = "nextToken")] @@ -1164,7 +1163,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

A collection of key and value pairs.

#[serde(rename = "tags")] @@ -1173,7 +1172,7 @@ pub struct ListTagsForResourceResponse { ///

Contains information about the network interfaces interacting with an EC2 instance. This data type is used as one of the elements of the AssetAttributes data type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkInterface { ///

The IP addresses associated with the network interface.

#[serde(rename = "ipv6Addresses")] @@ -1233,7 +1232,7 @@ pub struct PreviewAgentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PreviewAgentsResponse { ///

The resulting list of agents.

#[serde(rename = "agentPreviews")] @@ -1246,7 +1245,7 @@ pub struct PreviewAgentsResponse { ///

Contains information about a private IP address associated with a network interface. This data type is used as a response element in the DescribeFindings action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PrivateIp { ///

The DNS name of the private IP address.

#[serde(rename = "privateDnsName")] @@ -1276,7 +1275,7 @@ pub struct RemoveAttributesFromFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveAttributesFromFindingsResponse { ///

Attributes details that cannot be described. An error code is provided for each failed item.

#[serde(rename = "failedItems")] @@ -1285,7 +1284,7 @@ pub struct RemoveAttributesFromFindingsResponse { ///

Contains information about a resource group. The resource group defines a set of tags that, when queried, identify the AWS resources that make up the assessment target. This data type is used as the response element in the DescribeResourceGroups action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceGroup { ///

The ARN of the resource group.

#[serde(rename = "arn")] @@ -1312,7 +1311,7 @@ pub struct ResourceGroupTag { ///

Contains information about an Amazon Inspector rules package. This data type is used as the response element in the DescribeRulesPackages action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RulesPackage { ///

The ARN of the rules package.

#[serde(rename = "arn")] @@ -1334,7 +1333,7 @@ pub struct RulesPackage { ///

This data type contains key-value pairs that identify various Amazon resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Scope { ///

The type of the scope.

#[serde(rename = "key")] @@ -1348,7 +1347,7 @@ pub struct Scope { ///

Contains information about a security group associated with a network interface. This data type is used as one of the elements of the NetworkInterface data type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityGroup { ///

The ID of the security group.

#[serde(rename = "groupId")] @@ -1383,7 +1382,7 @@ pub struct StartAssessmentRunRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartAssessmentRunResponse { ///

The ARN of the assessment run that has been started.

#[serde(rename = "assessmentRunArn")] @@ -1416,7 +1415,7 @@ pub struct SubscribeToEventRequest { ///

This data type is used as a response element in the ListEventSubscriptions action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Subscription { ///

The list of existing event subscriptions.

#[serde(rename = "eventSubscriptions")] @@ -1443,7 +1442,7 @@ pub struct Tag { ///

The metadata about the Amazon Inspector application data metrics collected by the agent. This data type is used as the response element in the GetTelemetryMetadata action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TelemetryMetadata { ///

The count of messages that the agent sends to the Amazon Inspector service.

#[serde(rename = "count")] @@ -3817,10 +3816,7 @@ impl InspectorClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> InspectorClient { - InspectorClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3834,10 +3830,14 @@ impl InspectorClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - InspectorClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> InspectorClient { + InspectorClient { client, region } } } diff --git a/rusoto/services/iot-data/Cargo.toml b/rusoto/services/iot-data/Cargo.toml index c5d837acc81..85fdfb825e2 100644 --- a/rusoto/services/iot-data/Cargo.toml +++ b/rusoto/services/iot-data/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iot_data" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -21,14 +21,16 @@ serde = "1.0.2" serde_derive = "1.0.2" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iot-data/README.md b/rusoto/services/iot-data/README.md index 4956700d10f..2c9c175f06a 100644 --- a/rusoto/services/iot-data/README.md +++ b/rusoto/services/iot-data/README.md @@ -23,9 +23,16 @@ To use `rusoto_iot_data` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_iot_data = "0.40.0" +rusoto_iot_data = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iot-data/src/custom/mod.rs b/rusoto/services/iot-data/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iot-data/src/custom/mod.rs +++ b/rusoto/services/iot-data/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iot-data/src/generated.rs b/rusoto/services/iot-data/src/generated.rs index ad5a4ef370a..aa5e187e472 100644 --- a/rusoto/services/iot-data/src/generated.rs +++ b/rusoto/services/iot-data/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -427,10 +426,7 @@ impl IotDataClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> IotDataClient { - IotDataClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -444,10 +440,14 @@ impl IotDataClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - IotDataClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> IotDataClient { + IotDataClient { client, region } } } diff --git a/rusoto/services/iot-jobs-data/Cargo.toml b/rusoto/services/iot-jobs-data/Cargo.toml index eb95f9bb57c..36e4034b8b7 100644 --- a/rusoto/services/iot-jobs-data/Cargo.toml +++ b/rusoto/services/iot-jobs-data/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iot_jobs_data" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iot-jobs-data/README.md b/rusoto/services/iot-jobs-data/README.md index e7ac60f989d..3b51e6a9023 100644 --- a/rusoto/services/iot-jobs-data/README.md +++ b/rusoto/services/iot-jobs-data/README.md @@ -23,9 +23,16 @@ To use `rusoto_iot_jobs_data` in your application, add it as a dependency in you ```toml [dependencies] -rusoto_iot_jobs_data = "0.40.0" +rusoto_iot_jobs_data = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iot-jobs-data/src/custom/mod.rs b/rusoto/services/iot-jobs-data/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iot-jobs-data/src/custom/mod.rs +++ b/rusoto/services/iot-jobs-data/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iot-jobs-data/src/generated.rs b/rusoto/services/iot-jobs-data/src/generated.rs index 0db17b32359..4c9d5cefe78 100644 --- a/rusoto/services/iot-jobs-data/src/generated.rs +++ b/rusoto/services/iot-jobs-data/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -44,7 +43,7 @@ pub struct DescribeJobExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobExecutionResponse { ///

Contains data about a job execution.

#[serde(rename = "execution")] @@ -60,7 +59,7 @@ pub struct GetPendingJobExecutionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPendingJobExecutionsResponse { ///

A list of JobExecutionSummary objects with status IN_PROGRESS.

#[serde(rename = "inProgressJobs")] @@ -74,7 +73,7 @@ pub struct GetPendingJobExecutionsResponse { ///

Contains data about a job execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecution { ///

The estimated number of seconds that remain before the job execution status will be changed to TIMED_OUT.

#[serde(rename = "approximateSecondsBeforeTimedOut")] @@ -124,7 +123,7 @@ pub struct JobExecution { ///

Contains data about the state of a job execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecutionState { ///

The status of the job execution. Can be one of: "QUEUED", "IN_PROGRESS", "FAILED", "SUCCESS", "CANCELED", "REJECTED", or "REMOVED".

#[serde(rename = "status")] @@ -142,7 +141,7 @@ pub struct JobExecutionState { ///

Contains a subset of information about a job execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecutionSummary { ///

A number that identifies a particular job execution on a particular device.

#[serde(rename = "executionNumber")] @@ -186,7 +185,7 @@ pub struct StartNextPendingJobExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartNextPendingJobExecutionResponse { ///

A JobExecution object.

#[serde(rename = "execution")] @@ -232,7 +231,7 @@ pub struct UpdateJobExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateJobExecutionResponse { ///

A JobExecutionState object.

#[serde(rename = "executionState")] @@ -554,10 +553,7 @@ impl IotJobsDataClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> IotJobsDataClient { - IotJobsDataClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -571,10 +567,14 @@ impl IotJobsDataClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - IotJobsDataClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> IotJobsDataClient { + IotJobsDataClient { client, region } } } diff --git a/rusoto/services/iot/Cargo.toml b/rusoto/services/iot/Cargo.toml index 185a9f961a7..ff773ff2621 100644 --- a/rusoto/services/iot/Cargo.toml +++ b/rusoto/services/iot/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iot" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iot/README.md b/rusoto/services/iot/README.md index 14993ea4744..8f8033ee8d7 100644 --- a/rusoto/services/iot/README.md +++ b/rusoto/services/iot/README.md @@ -23,9 +23,16 @@ To use `rusoto_iot` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_iot = "0.40.0" +rusoto_iot = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iot/src/custom/mod.rs b/rusoto/services/iot/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iot/src/custom/mod.rs +++ b/rusoto/services/iot/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iot/src/generated.rs b/rusoto/services/iot/src/generated.rs index 236095f0889..a0f105ffb6d 100644 --- a/rusoto/services/iot/src/generated.rs +++ b/rusoto/services/iot/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -133,7 +132,7 @@ pub struct Action { ///

Information about an active Device Defender security profile behavior violation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActiveViolation { ///

The behavior which is being violated.

#[serde(rename = "behavior")] @@ -186,7 +185,7 @@ pub struct AddThingToBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddThingToBillingGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -214,9 +213,21 @@ pub struct AddThingToThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddThingToThingGroupResponse {} +///

Parameters used when defining a mitigation action that move a set of things to a thing group.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AddThingsToThingGroupParams { + ///

Specifies if this mitigation action can move the things that triggered the mitigation action even if they are part of one or more dynamic things groups.

+ #[serde(rename = "overrideDynamicGroups")] + #[serde(skip_serializing_if = "Option::is_none")] + pub override_dynamic_groups: Option, + ///

The list of groups to which you want to add the things that triggered the mitigation action. You can add a thing to a maximum of 10 groups, but you cannot add a thing to more than one group in the same hierarchy.

+ #[serde(rename = "thingGroupNames")] + pub thing_group_names: Vec, +} + ///

A structure containing the alert target ARN and the role ARN.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AlertTarget { @@ -230,7 +241,7 @@ pub struct AlertTarget { ///

Contains information that allowed the authorization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Allowed { ///

A list of policies that allowed the authentication.

#[serde(rename = "policies")] @@ -253,7 +264,7 @@ pub struct AssociateTargetsWithJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateTargetsWithJobResponse { ///

A short text description of the job.

#[serde(rename = "description")] @@ -301,7 +312,7 @@ pub struct AttachSecurityProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachSecurityProfileResponse {} ///

The input for the AttachThingPrincipal operation.

@@ -317,7 +328,7 @@ pub struct AttachThingPrincipalRequest { ///

The output from the AttachThingPrincipal operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachThingPrincipalResponse {} ///

The attribute payload.

@@ -327,7 +338,7 @@ pub struct AttributePayload { #[serde(rename = "attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.

To remove an attribute, call UpdateThing with an empty attribute value.

The merge attribute is only valid when calling UpdateThing.

+ ///

Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.

To remove an attribute, call UpdateThing with an empty attribute value.

The merge attribute is only valid when calling UpdateThing or UpdateThingGroup.

#[serde(rename = "merge")] #[serde(skip_serializing_if = "Option::is_none")] pub merge: Option, @@ -344,25 +355,25 @@ pub struct AuditCheckConfiguration { ///

Information about the audit check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuditCheckDetails { - ///

True if the check completed and found all resources compliant.

+ ///

True if the check is complete and found all resources compliant.

#[serde(rename = "checkCompliant")] #[serde(skip_serializing_if = "Option::is_none")] pub check_compliant: Option, - ///

The completion status of this check, one of "IN_PROGRESS", "WAITING_FOR_DATA_COLLECTION", "CANCELED", "COMPLETED_COMPLIANT", "COMPLETED_NON_COMPLIANT", or "FAILED".

+ ///

The completion status of this check. One of "IN_PROGRESS", "WAITING_FOR_DATA_COLLECTION", "CANCELED", "COMPLETED_COMPLIANT", "COMPLETED_NON_COMPLIANT", or "FAILED".

#[serde(rename = "checkRunStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub check_run_status: Option, - ///

The code of any error encountered when performing this check during this audit. One of "INSUFFICIENT_PERMISSIONS", or "AUDIT_CHECK_DISABLED".

+ ///

The code of any error encountered when this check is performed during this audit. One of "INSUFFICIENT_PERMISSIONS" or "AUDIT_CHECK_DISABLED".

#[serde(rename = "errorCode")] #[serde(skip_serializing_if = "Option::is_none")] pub error_code: Option, - ///

The message associated with any error encountered when performing this check during this audit.

+ ///

The message associated with any error encountered when this check is performed during this audit.

#[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option, - ///

The number of resources that the check found non-compliant.

+ ///

The number of resources that were found noncompliant during the check.

#[serde(rename = "nonCompliantResourcesCount")] #[serde(skip_serializing_if = "Option::is_none")] pub non_compliant_resources_count: Option, @@ -374,25 +385,29 @@ pub struct AuditCheckDetails { ///

The findings (results) of the audit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuditFinding { ///

The audit check that generated this result.

#[serde(rename = "checkName")] #[serde(skip_serializing_if = "Option::is_none")] pub check_name: Option, + ///

A unique identifier for this set of audit findings. This identifier is used to apply mitigation tasks to one or more sets of findings.

+ #[serde(rename = "findingId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub finding_id: Option, ///

The time the result (finding) was discovered.

#[serde(rename = "findingTime")] #[serde(skip_serializing_if = "Option::is_none")] pub finding_time: Option, - ///

The resource that was found to be non-compliant with the audit check.

+ ///

The resource that was found to be noncompliant with the audit check.

#[serde(rename = "nonCompliantResource")] #[serde(skip_serializing_if = "Option::is_none")] pub non_compliant_resource: Option, - ///

The reason the resource was non-compliant.

+ ///

The reason the resource was noncompliant.

#[serde(rename = "reasonForNonCompliance")] #[serde(skip_serializing_if = "Option::is_none")] pub reason_for_non_compliance: Option, - ///

A code which indicates the reason that the resource was non-compliant.

+ ///

A code that indicates the reason that the resource was noncompliant.

#[serde(rename = "reasonForNonComplianceCode")] #[serde(skip_serializing_if = "Option::is_none")] pub reason_for_non_compliance_code: Option, @@ -404,7 +419,7 @@ pub struct AuditFinding { #[serde(rename = "severity")] #[serde(skip_serializing_if = "Option::is_none")] pub severity: Option, - ///

The ID of the audit that generated this result (finding)

+ ///

The ID of the audit that generated this result (finding).

#[serde(rename = "taskId")] #[serde(skip_serializing_if = "Option::is_none")] pub task_id: Option, @@ -414,6 +429,83 @@ pub struct AuditFinding { pub task_start_time: Option, } +///

Returned by ListAuditMitigationActionsTask, this object contains information that describes a mitigation action that has been started.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct AuditMitigationActionExecutionMetadata { + ///

The unique identifier for the mitigation action being applied by the task.

+ #[serde(rename = "actionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_id: Option, + ///

The friendly name of the mitigation action being applied by the task.

+ #[serde(rename = "actionName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_name: Option, + ///

The date and time when the task was completed or canceled. Blank if the task is still running.

+ #[serde(rename = "endTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end_time: Option, + ///

If an error occurred, the code that indicates which type of error occurred.

+ #[serde(rename = "errorCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_code: Option, + ///

The unique identifier for the findings to which the task and associated mitigation action are applied.

+ #[serde(rename = "findingId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub finding_id: Option, + ///

If an error occurred, a message that describes the error.

+ #[serde(rename = "message")] + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + ///

The date and time when the task was started.

+ #[serde(rename = "startTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_time: Option, + ///

The current status of the task being executed.

+ #[serde(rename = "status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The unique identifier for the task that applies the mitigation action.

+ #[serde(rename = "taskId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_id: Option, +} + +///

Information about an audit mitigation actions task that is returned by ListAuditMitigationActionsTasks.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct AuditMitigationActionsTaskMetadata { + ///

The time at which the audit mitigation actions task was started.

+ #[serde(rename = "startTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_time: Option, + ///

The unique identifier for the task.

+ #[serde(rename = "taskId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_id: Option, + ///

The current state of the audit mitigation actions task.

+ #[serde(rename = "taskStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_status: Option, +} + +///

Used in MitigationActionParams, this information identifies the target findings to which the mitigation actions are applied. Only one entry appears.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AuditMitigationActionsTaskTarget { + ///

Specifies a filter in the form of an audit check and set of reason codes that identify the findings from the audit to which the audit mitigation actions task apply.

+ #[serde(rename = "auditCheckToReasonCodeFilter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub audit_check_to_reason_code_filter: Option<::std::collections::HashMap>>, + ///

If the task will apply a mitigation action to findings from a specific audit, this value uniquely identifies the audit.

+ #[serde(rename = "auditTaskId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub audit_task_id: Option, + ///

If the task will apply a mitigation action to one or more listed findings, this value uniquely identifies those findings.

+ #[serde(rename = "findingIds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub finding_ids: Option>, +} + ///

Information about the targets to which audit notifications are sent.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AuditNotificationTarget { @@ -433,17 +525,17 @@ pub struct AuditNotificationTarget { ///

The audits that were performed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuditTaskMetadata { ///

The ID of this audit.

#[serde(rename = "taskId")] #[serde(skip_serializing_if = "Option::is_none")] pub task_id: Option, - ///

The status of this audit: one of "IN_PROGRESS", "COMPLETED", "FAILED" or "CANCELED".

+ ///

The status of this audit. One of "IN_PROGRESS", "COMPLETED", "FAILED", or "CANCELED".

#[serde(rename = "taskStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub task_status: Option, - ///

The type of this audit: one of "ON_DEMAND_AUDIT_TASK" or "SCHEDULED_AUDIT_TASK".

+ ///

The type of this audit. One of "ON_DEMAND_AUDIT_TASK" or "SCHEDULED_AUDIT_TASK".

#[serde(rename = "taskType")] #[serde(skip_serializing_if = "Option::is_none")] pub task_type: Option, @@ -464,7 +556,7 @@ pub struct AuthInfo { ///

The authorizer result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthResult { ///

The policies and statements that allowed the specified action.

#[serde(rename = "allowed")] @@ -490,7 +582,7 @@ pub struct AuthResult { ///

The authorizer description.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthorizerDescription { ///

The authorizer ARN.

#[serde(rename = "authorizerArn")] @@ -528,7 +620,7 @@ pub struct AuthorizerDescription { ///

The authorizer summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthorizerSummary { ///

The authorizer ARN.

#[serde(rename = "authorizerArn")] @@ -596,7 +688,7 @@ pub struct BehaviorCriteria { ///

Additional information about the billing group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BillingGroupMetadata { ///

The date the billing group was created.

#[serde(rename = "creationDate")] @@ -615,7 +707,7 @@ pub struct BillingGroupProperties { ///

A CA certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CACertificate { ///

The ARN of the CA certificate.

#[serde(rename = "certificateArn")] @@ -637,7 +729,7 @@ pub struct CACertificate { ///

Describes a CA certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CACertificateDescription { ///

Whether the CA certificate configured for auto registration of device certificates. Valid values are "ENABLE" and "DISABLE"

#[serde(rename = "autoRegistrationStatus")] @@ -685,6 +777,17 @@ pub struct CACertificateDescription { pub validity: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CancelAuditMitigationActionsTaskRequest { + ///

The unique identifier for the task that you want to cancel.

+ #[serde(rename = "taskId")] + pub task_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CancelAuditMitigationActionsTaskResponse {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CancelAuditTaskRequest { ///

The ID of the audit you want to cancel. You can only cancel an audit that is "IN_PROGRESS".

@@ -693,7 +796,7 @@ pub struct CancelAuditTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelAuditTaskResponse {} ///

The input for the CancelCertificateTransfer operation.

@@ -746,7 +849,7 @@ pub struct CancelJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelJobResponse { ///

A short text description of the job.

#[serde(rename = "description")] @@ -764,7 +867,7 @@ pub struct CancelJobResponse { ///

Information about a certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Certificate { ///

The ARN of the certificate.

#[serde(rename = "certificateArn")] @@ -786,7 +889,7 @@ pub struct Certificate { ///

Describes a certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CertificateDescription { ///

The certificate ID of the CA certificate used to sign this certificate.

#[serde(rename = "caCertificateId")] @@ -844,7 +947,7 @@ pub struct CertificateDescription { ///

When the certificate is valid.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CertificateValidity { ///

The certificate is not valid after this date.

#[serde(rename = "notAfter")] @@ -860,7 +963,7 @@ pub struct CertificateValidity { pub struct ClearDefaultAuthorizerRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClearDefaultAuthorizerResponse {} ///

Describes an action that updates a CloudWatch alarm.

@@ -978,7 +1081,7 @@ pub struct CreateAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAuthorizerResponse { ///

The authorizer ARN.

#[serde(rename = "authorizerArn")] @@ -1006,7 +1109,7 @@ pub struct CreateBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBillingGroupResponse { ///

The ARN of the billing group.

#[serde(rename = "billingGroupArn")] @@ -1036,7 +1139,7 @@ pub struct CreateCertificateFromCsrRequest { ///

The output from the CreateCertificateFromCsr operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCertificateFromCsrResponse { ///

The Amazon Resource Name (ARN) of the certificate. You can use the ARN as a principal for policy operations.

#[serde(rename = "certificateArn")] @@ -1079,7 +1182,7 @@ pub struct CreateDynamicThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDynamicThingGroupResponse { ///

The dynamic thing group index name.

#[serde(rename = "indexName")] @@ -1154,7 +1257,7 @@ pub struct CreateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobResponse { ///

The job description.

#[serde(rename = "description")] @@ -1181,7 +1284,7 @@ pub struct CreateKeysAndCertificateRequest { ///

The output of the CreateKeysAndCertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateKeysAndCertificateResponse { ///

The ARN of the certificate.

#[serde(rename = "certificateArn")] @@ -1201,6 +1304,36 @@ pub struct CreateKeysAndCertificateResponse { pub key_pair: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateMitigationActionRequest { + ///

A friendly name for the action. Choose a friendly name that accurately describes the action (for example, EnableLoggingAction).

+ #[serde(rename = "actionName")] + pub action_name: String, + ///

Defines the type of action and the parameters for that action.

+ #[serde(rename = "actionParams")] + pub action_params: MitigationActionParams, + ///

The ARN of the IAM role that is used to apply the mitigation action.

+ #[serde(rename = "roleArn")] + pub role_arn: String, + ///

Metadata that can be used to manage the mitigation action.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateMitigationActionResponse { + ///

The ARN for the new mitigation action.

+ #[serde(rename = "actionArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_arn: Option, + ///

A unique identifier for the new mitigation action.

+ #[serde(rename = "actionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateOTAUpdateRequest { ///

A list of additional OTA update parameters which are name-value pairs.

@@ -1238,7 +1371,7 @@ pub struct CreateOTAUpdateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateOTAUpdateResponse { ///

The AWS IoT job ARN associated with the OTA update.

#[serde(rename = "awsIotJobArn")] @@ -1275,7 +1408,7 @@ pub struct CreatePolicyRequest { ///

The output from the CreatePolicy operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePolicyResponse { ///

The policy ARN.

#[serde(rename = "policyArn")] @@ -1312,7 +1445,7 @@ pub struct CreatePolicyVersionRequest { ///

The output of the CreatePolicyVersion operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePolicyVersionResponse { ///

Specifies whether the policy version is the default.

#[serde(rename = "isDefaultVersion")] @@ -1347,7 +1480,7 @@ pub struct CreateRoleAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRoleAliasResponse { ///

The role alias.

#[serde(rename = "roleAlias")] @@ -1365,27 +1498,27 @@ pub struct CreateScheduledAuditRequest { #[serde(rename = "dayOfMonth")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_month: Option, - ///

The day of the week on which the scheduled audit takes place. Can be one of "SUN", "MON", "TUE", "WED", "THU", "FRI" or "SAT". This field is required if the "frequency" parameter is set to "WEEKLY" or "BIWEEKLY".

+ ///

The day of the week on which the scheduled audit takes place. Can be one of "SUN", "MON", "TUE", "WED", "THU", "FRI", or "SAT". This field is required if the "frequency" parameter is set to "WEEKLY" or "BIWEEKLY".

#[serde(rename = "dayOfWeek")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_week: Option, - ///

How often the scheduled audit takes place. Can be one of "DAILY", "WEEKLY", "BIWEEKLY" or "MONTHLY". The actual start time of each audit is determined by the system.

+ ///

How often the scheduled audit takes place. Can be one of "DAILY", "WEEKLY", "BIWEEKLY" or "MONTHLY". The start time of each audit is determined by the system.

#[serde(rename = "frequency")] pub frequency: String, ///

The name you want to give to the scheduled audit. (Max. 128 chars)

#[serde(rename = "scheduledAuditName")] pub scheduled_audit_name: String, - ///

Metadata which can be used to manage the scheduled audit.

+ ///

Metadata that can be used to manage the scheduled audit.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, - ///

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

+ ///

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration to select which checks are enabled.)

#[serde(rename = "targetCheckNames")] pub target_check_names: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateScheduledAuditResponse { ///

The ARN of the scheduled audit.

#[serde(rename = "scheduledAuditArn")] @@ -1395,7 +1528,7 @@ pub struct CreateScheduledAuditResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateSecurityProfileRequest { - ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors but it is also retained for any metric specified here.

+ ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

#[serde(rename = "additionalMetricsToRetain")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_metrics_to_retain: Option>, @@ -1414,14 +1547,14 @@ pub struct CreateSecurityProfileRequest { ///

The name you are giving to the security profile.

#[serde(rename = "securityProfileName")] pub security_profile_name: String, - ///

Metadata which can be used to manage the security profile.

+ ///

Metadata that can be used to manage the security profile.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSecurityProfileResponse { ///

The ARN of the security profile.

#[serde(rename = "securityProfileArn")] @@ -1455,7 +1588,7 @@ pub struct CreateStreamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStreamResponse { ///

A description of the stream.

#[serde(rename = "description")] @@ -1495,7 +1628,7 @@ pub struct CreateThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateThingGroupResponse { ///

The thing group ARN.

#[serde(rename = "thingGroupArn")] @@ -1533,7 +1666,7 @@ pub struct CreateThingRequest { ///

The output of the CreateThing operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateThingResponse { ///

The ARN of the new thing.

#[serde(rename = "thingArn")] @@ -1567,7 +1700,7 @@ pub struct CreateThingTypeRequest { ///

The output of the CreateThingType operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateThingTypeResponse { ///

The Amazon Resource Name (ARN) of the thing type.

#[serde(rename = "thingTypeArn")] @@ -1628,7 +1761,7 @@ pub struct DeleteAccountAuditConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAccountAuditConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1639,7 +1772,7 @@ pub struct DeleteAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAuthorizerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1654,7 +1787,7 @@ pub struct DeleteBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBillingGroupResponse {} ///

Input for the DeleteCACertificate operation.

@@ -1667,7 +1800,7 @@ pub struct DeleteCACertificateRequest { ///

The output for the DeleteCACertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCACertificateResponse {} ///

The input for the DeleteCertificate operation.

@@ -1676,7 +1809,7 @@ pub struct DeleteCertificateRequest { ///

The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)

#[serde(rename = "certificateId")] pub certificate_id: String, - ///

Forces a certificate request to be deleted.

+ ///

Forces the deletion of a certificate if it is inactive and is not attached to an IoT thing.

#[serde(rename = "forceDelete")] #[serde(skip_serializing_if = "Option::is_none")] pub force_delete: Option, @@ -1694,7 +1827,7 @@ pub struct DeleteDynamicThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDynamicThingGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1725,6 +1858,17 @@ pub struct DeleteJobRequest { pub job_id: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteMitigationActionRequest { + ///

The name of the mitigation action that you want to delete.

+ #[serde(rename = "actionName")] + pub action_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteMitigationActionResponse {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteOTAUpdateRequest { ///

Specifies if the stream associated with an OTA update should be deleted when the OTA update is deleted.

@@ -1741,7 +1885,7 @@ pub struct DeleteOTAUpdateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteOTAUpdateResponse {} ///

The input for the DeletePolicy operation.

@@ -1769,7 +1913,7 @@ pub struct DeleteRegistrationCodeRequest {} ///

The output for the DeleteRegistrationCode operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRegistrationCodeResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1780,7 +1924,7 @@ pub struct DeleteRoleAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRoleAliasResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1791,12 +1935,12 @@ pub struct DeleteScheduledAuditRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteScheduledAuditResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteSecurityProfileRequest { - ///

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.

+ ///

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different from the actual version, a VersionConflictException is thrown.

#[serde(rename = "expectedVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub expected_version: Option, @@ -1806,7 +1950,7 @@ pub struct DeleteSecurityProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSecurityProfileResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1817,7 +1961,7 @@ pub struct DeleteStreamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteStreamResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1832,7 +1976,7 @@ pub struct DeleteThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteThingGroupResponse {} ///

The input for the DeleteThing operation.

@@ -1849,7 +1993,7 @@ pub struct DeleteThingRequest { ///

The output of the DeleteThing operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteThingResponse {} ///

The input for the DeleteThingType operation.

@@ -1862,7 +2006,7 @@ pub struct DeleteThingTypeRequest { ///

The output for the DeleteThingType operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteThingTypeResponse {} ///

The input for the DeleteTopicRule operation.

@@ -1885,7 +2029,7 @@ pub struct DeleteV2LoggingLevelRequest { ///

Contains information that denied the authorization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Denied { ///

Information that explicitly denies the authorization.

#[serde(rename = "explicitDeny")] @@ -1911,14 +2055,14 @@ pub struct DeprecateThingTypeRequest { ///

The output for the DeprecateThingType operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeprecateThingTypeResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeAccountAuditConfigurationRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAccountAuditConfigurationResponse { ///

Which audit checks are enabled and disabled for this account.

#[serde(rename = "auditCheckConfigurations")] @@ -1930,12 +2074,67 @@ pub struct DescribeAccountAuditConfigurationResponse { #[serde(skip_serializing_if = "Option::is_none")] pub audit_notification_target_configurations: Option<::std::collections::HashMap>, - ///

The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates and other items as necessary when performing an audit.

On the first call to UpdateAccountAuditConfiguration this parameter is required.

+ ///

The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates, and other items as required when performing an audit.

On the first call to UpdateAccountAuditConfiguration, this parameter is required.

#[serde(rename = "roleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeAuditFindingRequest { + ///

A unique identifier for a single audit finding. You can use this identifier to apply mitigation actions to the finding.

+ #[serde(rename = "findingId")] + pub finding_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeAuditFindingResponse { + #[serde(rename = "finding")] + #[serde(skip_serializing_if = "Option::is_none")] + pub finding: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeAuditMitigationActionsTaskRequest { + ///

The unique identifier for the audit mitigation task.

+ #[serde(rename = "taskId")] + pub task_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeAuditMitigationActionsTaskResponse { + ///

Specifies the mitigation actions and their parameters that are applied as part of this task.

+ #[serde(rename = "actionsDefinition")] + #[serde(skip_serializing_if = "Option::is_none")] + pub actions_definition: Option>, + ///

Specifies the mitigation actions that should be applied to specific audit checks.

+ #[serde(rename = "auditCheckToActionsMapping")] + #[serde(skip_serializing_if = "Option::is_none")] + pub audit_check_to_actions_mapping: Option<::std::collections::HashMap>>, + ///

The date and time when the task was completed or canceled.

+ #[serde(rename = "endTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end_time: Option, + ///

The date and time when the task was started.

+ #[serde(rename = "startTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_time: Option, + ///

Identifies the findings to which the mitigation actions are applied. This can be by audit checks, by audit task, or a set of findings.

+ #[serde(rename = "target")] + #[serde(skip_serializing_if = "Option::is_none")] + pub target: Option, + ///

Aggregate counts of the results when the mitigation tasks were applied to the findings for this audit mitigation actions task.

+ #[serde(rename = "taskStatistics")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_statistics: Option<::std::collections::HashMap>, + ///

The current status of the task.

+ #[serde(rename = "taskStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_status: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeAuditTaskRequest { ///

The ID of the audit whose information you want to get.

@@ -1944,7 +2143,7 @@ pub struct DescribeAuditTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAuditTaskResponse { ///

Detailed information about each check performed during this audit.

#[serde(rename = "auditDetails")] @@ -1980,7 +2179,7 @@ pub struct DescribeAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAuthorizerResponse { ///

The authorizer description.

#[serde(rename = "authorizerDescription")] @@ -1996,7 +2195,7 @@ pub struct DescribeBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBillingGroupResponse { ///

The ARN of the billing group.

#[serde(rename = "billingGroupArn")] @@ -2034,7 +2233,7 @@ pub struct DescribeCACertificateRequest { ///

The output from the DescribeCACertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCACertificateResponse { ///

The CA certificate description.

#[serde(rename = "certificateDescription")] @@ -2056,7 +2255,7 @@ pub struct DescribeCertificateRequest { ///

The output of the DescribeCertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCertificateResponse { ///

The description of the certificate.

#[serde(rename = "certificateDescription")] @@ -2068,7 +2267,7 @@ pub struct DescribeCertificateResponse { pub struct DescribeDefaultAuthorizerRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDefaultAuthorizerResponse { ///

The default authorizer's description.

#[serde(rename = "authorizerDescription")] @@ -2087,7 +2286,7 @@ pub struct DescribeEndpointRequest { ///

The output from the DescribeEndpoint operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointResponse { ///

The endpoint. The format of the endpoint is as follows: identifier.iot.region.amazonaws.com.

#[serde(rename = "endpointAddress")] @@ -2099,7 +2298,7 @@ pub struct DescribeEndpointResponse { pub struct DescribeEventConfigurationsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventConfigurationsResponse { ///

The creation date of the event configuration.

#[serde(rename = "creationDate")] @@ -2123,7 +2322,7 @@ pub struct DescribeIndexRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeIndexResponse { ///

The index name.

#[serde(rename = "indexName")] @@ -2154,7 +2353,7 @@ pub struct DescribeJobExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobExecutionResponse { ///

Information about the job execution.

#[serde(rename = "execution")] @@ -2170,7 +2369,7 @@ pub struct DescribeJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobResponse { ///

An S3 link to the job document.

#[serde(rename = "documentSource")] @@ -2182,6 +2381,50 @@ pub struct DescribeJobResponse { pub job: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeMitigationActionRequest { + ///

The friendly name that uniquely identifies the mitigation action.

+ #[serde(rename = "actionName")] + pub action_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeMitigationActionResponse { + ///

The ARN that identifies this migration action.

+ #[serde(rename = "actionArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_arn: Option, + ///

A unique identifier for this action.

+ #[serde(rename = "actionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_id: Option, + ///

The friendly name that uniquely identifies the mitigation action.

+ #[serde(rename = "actionName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_name: Option, + ///

Parameters that control how the mitigation action is applied, specific to the type of mitigation action.

+ #[serde(rename = "actionParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_params: Option, + ///

The type of mitigation action.

+ #[serde(rename = "actionType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_type: Option, + ///

The date and time when the mitigation action was added to your AWS account.

+ #[serde(rename = "creationDate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_date: Option, + ///

The date and time when the mitigation action was last changed.

+ #[serde(rename = "lastModifiedDate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified_date: Option, + ///

The ARN of the IAM role used to apply this action.

+ #[serde(rename = "roleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role_arn: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeRoleAliasRequest { ///

The role alias to describe.

@@ -2190,7 +2433,7 @@ pub struct DescribeRoleAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRoleAliasResponse { ///

The role alias description.

#[serde(rename = "roleAliasDescription")] @@ -2206,17 +2449,17 @@ pub struct DescribeScheduledAuditRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScheduledAuditResponse { ///

The day of the month on which the scheduled audit takes place. Will be "1" through "31" or "LAST". If days 29-31 are specified, and the month does not have that many days, the audit takes place on the "LAST" day of the month.

#[serde(rename = "dayOfMonth")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_month: Option, - ///

The day of the week on which the scheduled audit takes place. One of "SUN", "MON", "TUE", "WED", "THU", "FRI" or "SAT".

+ ///

The day of the week on which the scheduled audit takes place. One of "SUN", "MON", "TUE", "WED", "THU", "FRI", or "SAT".

#[serde(rename = "dayOfWeek")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_week: Option, - ///

How often the scheduled audit takes place. One of "DAILY", "WEEKLY", "BIWEEKLY" or "MONTHLY". The actual start time of each audit is determined by the system.

+ ///

How often the scheduled audit takes place. One of "DAILY", "WEEKLY", "BIWEEKLY", or "MONTHLY". The start time of each audit is determined by the system.

#[serde(rename = "frequency")] #[serde(skip_serializing_if = "Option::is_none")] pub frequency: Option, @@ -2228,7 +2471,7 @@ pub struct DescribeScheduledAuditResponse { #[serde(rename = "scheduledAuditName")] #[serde(skip_serializing_if = "Option::is_none")] pub scheduled_audit_name: Option, - ///

Which checks are performed during the scheduled audit. (Note that checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

+ ///

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration to select which checks are enabled.)

#[serde(rename = "targetCheckNames")] #[serde(skip_serializing_if = "Option::is_none")] pub target_check_names: Option>, @@ -2242,9 +2485,9 @@ pub struct DescribeSecurityProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSecurityProfileResponse { - ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors but it is also retained for any metric specified here.

+ ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

#[serde(rename = "additionalMetricsToRetain")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_metrics_to_retain: Option>, @@ -2290,7 +2533,7 @@ pub struct DescribeStreamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamResponse { ///

Information about the stream.

#[serde(rename = "streamInfo")] @@ -2306,7 +2549,7 @@ pub struct DescribeThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeThingGroupResponse { ///

The dynamic thing group index name.

#[serde(rename = "indexName")] @@ -2358,7 +2601,7 @@ pub struct DescribeThingRegistrationTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeThingRegistrationTaskResponse { ///

The task creation date.

#[serde(rename = "creationDate")] @@ -2420,7 +2663,7 @@ pub struct DescribeThingRequest { ///

The output from the DescribeThing operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeThingResponse { ///

The thing attributes.

#[serde(rename = "attributes")] @@ -2466,7 +2709,7 @@ pub struct DescribeThingTypeRequest { ///

The output for the DescribeThingType operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeThingTypeResponse { ///

The thing type ARN.

#[serde(rename = "thingTypeArn")] @@ -2531,7 +2774,7 @@ pub struct DetachSecurityProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachSecurityProfileResponse {} ///

The input for the DetachThingPrincipal operation.

@@ -2547,7 +2790,7 @@ pub struct DetachThingPrincipalRequest { ///

The output from the DetachThingPrincipal operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachThingPrincipalResponse {} ///

The input for the DisableTopicRuleRequest operation.

@@ -2612,7 +2855,7 @@ pub struct DynamoDBv2Action { ///

The policy that has the effect on the authorization results.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EffectivePolicy { ///

The policy ARN.

#[serde(rename = "policyArn")] @@ -2648,6 +2891,17 @@ pub struct ElasticsearchAction { pub type_: String, } +///

Parameters used when defining a mitigation action that enable AWS IoT logging.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct EnableIoTLoggingParams { + ///

Specifies the types of information to be logged.

+ #[serde(rename = "logLevel")] + pub log_level: String, + ///

The ARN of the IAM role used for logging.

+ #[serde(rename = "roleArnForLogging")] + pub role_arn_for_logging: String, +} + ///

The input for the EnableTopicRuleRequest operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct EnableTopicRuleRequest { @@ -2658,7 +2912,7 @@ pub struct EnableTopicRuleRequest { ///

Error information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorInfo { ///

The error code.

#[serde(rename = "code")] @@ -2672,7 +2926,7 @@ pub struct ErrorInfo { ///

Information that explicitly denies authorization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExplicitDeny { ///

The policies that denied the authorization.

#[serde(rename = "policies")] @@ -2739,7 +2993,7 @@ pub struct GetEffectivePoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetEffectivePoliciesResponse { ///

The effective policies.

#[serde(rename = "effectivePolicies")] @@ -2751,7 +3005,7 @@ pub struct GetEffectivePoliciesResponse { pub struct GetIndexingConfigurationRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIndexingConfigurationResponse { ///

The index configuration.

#[serde(rename = "thingGroupIndexingConfiguration")] @@ -2771,7 +3025,7 @@ pub struct GetJobDocumentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobDocumentResponse { ///

The job document content.

#[serde(rename = "document")] @@ -2785,7 +3039,7 @@ pub struct GetLoggingOptionsRequest {} ///

The output from the GetLoggingOptions operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoggingOptionsResponse { ///

The logging level.

#[serde(rename = "logLevel")] @@ -2805,7 +3059,7 @@ pub struct GetOTAUpdateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOTAUpdateResponse { ///

The OTA update info.

#[serde(rename = "otaUpdateInfo")] @@ -2823,7 +3077,7 @@ pub struct GetPolicyRequest { ///

The output from the GetPolicy operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPolicyResponse { ///

The date the policy was created.

#[serde(rename = "creationDate")] @@ -2868,7 +3122,7 @@ pub struct GetPolicyVersionRequest { ///

The output from the GetPolicyVersion operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPolicyVersionResponse { ///

The date the policy version was created.

#[serde(rename = "creationDate")] @@ -2910,7 +3164,7 @@ pub struct GetRegistrationCodeRequest {} ///

The output from the GetRegistrationCode operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRegistrationCodeResponse { ///

The CA certificate registration code.

#[serde(rename = "registrationCode")] @@ -2938,7 +3192,7 @@ pub struct GetStatisticsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetStatisticsResponse { ///

The statistics returned by the Fleet Indexing service based on the query and aggregation field.

#[serde(rename = "statistics")] @@ -2956,7 +3210,7 @@ pub struct GetTopicRuleRequest { ///

The output from the GetTopicRule operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTopicRuleResponse { ///

The rule.

#[serde(rename = "rule")] @@ -2972,7 +3226,7 @@ pub struct GetTopicRuleResponse { pub struct GetV2LoggingOptionsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetV2LoggingOptionsResponse { ///

The default log level.

#[serde(rename = "defaultLogLevel")] @@ -2990,7 +3244,7 @@ pub struct GetV2LoggingOptionsResponse { ///

The name and ARN of a group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupNameAndArn { ///

The group ARN.

#[serde(rename = "groupArn")] @@ -3004,7 +3258,7 @@ pub struct GroupNameAndArn { ///

Information that implicitly denies authorization. When policy doesn't explicitly deny or allow an action on a resource it is considered an implicit deny.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImplicitDeny { ///

Policies that don't contain a matching allow or deny statement for the specified action on the specified resource.

#[serde(rename = "policies")] @@ -3012,7 +3266,7 @@ pub struct ImplicitDeny { pub policies: Option>, } -///

Sends messge data to an AWS IoT Analytics channel.

+///

Sends message data to an AWS IoT Analytics channel.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct IotAnalyticsAction { ///

(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.

@@ -3046,7 +3300,7 @@ pub struct IotEventsAction { ///

The Job object contains details about a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

Configuration for criteria to abort the job.

#[serde(rename = "abortConfig")] @@ -3120,7 +3374,7 @@ pub struct Job { ///

The job execution object represents the execution of a job on a particular device.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecution { ///

The estimated number of seconds that remain before the job execution status will be changed to TIMED_OUT. The timeout interval can be anywhere between 1 minute and 7 days (1 to 10080 minutes). The actual job execution timeout can occur up to 60 seconds later than the estimated duration. This value will not be included if the job execution has reached a terminal status.

#[serde(rename = "approximateSecondsBeforeTimedOut")] @@ -3170,7 +3424,7 @@ pub struct JobExecution { ///

Details of the job execution status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecutionStatusDetails { ///

The job execution status.

#[serde(rename = "detailsMap")] @@ -3180,7 +3434,7 @@ pub struct JobExecutionStatusDetails { ///

The job execution summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecutionSummary { ///

A string (consisting of the digits "0" through "9") which identifies this particular job execution on this particular device. It can be used later in commands which return or update job execution information.

#[serde(rename = "executionNumber")] @@ -3206,7 +3460,7 @@ pub struct JobExecutionSummary { ///

Contains a summary of information about job executions for a specific job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecutionSummaryForJob { ///

Contains a subset of information about a job execution.

#[serde(rename = "jobExecutionSummary")] @@ -3220,7 +3474,7 @@ pub struct JobExecutionSummaryForJob { ///

The job execution summary for a thing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobExecutionSummaryForThing { ///

Contains a subset of information about a job execution.

#[serde(rename = "jobExecutionSummary")] @@ -3247,7 +3501,7 @@ pub struct JobExecutionsRolloutConfig { ///

The job process details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobProcessDetails { ///

The number of things that cancelled the job.

#[serde(rename = "numberOfCanceledThings")] @@ -3289,7 +3543,7 @@ pub struct JobProcessDetails { ///

The job summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobSummary { ///

The time, in seconds since the epoch, when the job completed.

#[serde(rename = "completedAt")] @@ -3327,7 +3581,7 @@ pub struct JobSummary { ///

Describes a key pair.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyPair { ///

The private key.

#[serde(rename = "PrivateKey")] @@ -3383,7 +3637,7 @@ pub struct ListActiveViolationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListActiveViolationsResponse { ///

The list of active violations.

#[serde(rename = "activeViolations")] @@ -3409,13 +3663,13 @@ pub struct ListAttachedPoliciesRequest { #[serde(rename = "recursive")] #[serde(skip_serializing_if = "Option::is_none")] pub recursive: Option, - ///

The group for which the policies will be listed.

+ ///

The group or principal for which the policies will be listed.

#[serde(rename = "target")] pub target: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAttachedPoliciesResponse { ///

The token to retrieve the next set of results, or ``null`` if there are no more results.

#[serde(rename = "nextMarker")] @@ -3445,7 +3699,7 @@ pub struct ListAuditFindingsRequest { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

Information identifying the non-compliant resource.

+ ///

Information identifying the noncompliant resource.

#[serde(rename = "resourceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_identifier: Option, @@ -3460,7 +3714,7 @@ pub struct ListAuditFindingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAuditFindingsResponse { ///

The findings (results) of the audit.

#[serde(rename = "findings")] @@ -3472,6 +3726,84 @@ pub struct ListAuditFindingsResponse { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListAuditMitigationActionsExecutionsRequest { + ///

Specify this filter to limit results to those with a specific status.

+ #[serde(rename = "actionStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_status: Option, + ///

Specify this filter to limit results to those that were applied to a specific audit finding.

+ #[serde(rename = "findingId")] + pub finding_id: String, + ///

The maximum number of results to return at one time. The default is 25.

+ #[serde(rename = "maxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The token for the next set of results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Specify this filter to limit results to actions for a specific audit mitigation actions task.

+ #[serde(rename = "taskId")] + pub task_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListAuditMitigationActionsExecutionsResponse { + ///

A set of task execution results based on the input parameters. Details include the mitigation action applied, start time, and task status.

+ #[serde(rename = "actionsExecutions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub actions_executions: Option>, + ///

The token for the next set of results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListAuditMitigationActionsTasksRequest { + ///

Specify this filter to limit results to tasks that were applied to results for a specific audit.

+ #[serde(rename = "auditTaskId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub audit_task_id: Option, + ///

Specify this filter to limit results to tasks that were completed or canceled on or before a specific date and time.

+ #[serde(rename = "endTime")] + pub end_time: f64, + ///

Specify this filter to limit results to tasks that were applied to a specific audit finding.

+ #[serde(rename = "findingId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub finding_id: Option, + ///

The maximum number of results to return at one time. The default is 25.

+ #[serde(rename = "maxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The token for the next set of results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Specify this filter to limit results to tasks that began on or after a specific date and time.

+ #[serde(rename = "startTime")] + pub start_time: f64, + ///

Specify this filter to limit results to tasks that are in a specific state.

+ #[serde(rename = "taskStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_status: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListAuditMitigationActionsTasksResponse { + ///

The token for the next set of results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The collection of audit mitigation tasks that matched the filter criteria.

+ #[serde(rename = "tasks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tasks: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListAuditTasksRequest { ///

The end of the time period.

@@ -3485,10 +3817,10 @@ pub struct ListAuditTasksRequest { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The beginning of the time period. Note that audit information is retained for a limited time (180 days). Requesting a start time prior to what is retained results in an "InvalidRequestException".

+ ///

The beginning of the time period. Audit information is retained for a limited time (180 days). Requesting a start time prior to what is retained results in an "InvalidRequestException".

#[serde(rename = "startTime")] pub start_time: f64, - ///

A filter to limit the output to audits with the specified completion status: can be one of "IN_PROGRESS", "COMPLETED", "FAILED" or "CANCELED".

+ ///

A filter to limit the output to audits with the specified completion status: can be one of "IN_PROGRESS", "COMPLETED", "FAILED", or "CANCELED".

#[serde(rename = "taskStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub task_status: Option, @@ -3499,7 +3831,7 @@ pub struct ListAuditTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAuditTasksResponse { ///

A token that can be used to retrieve the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -3532,7 +3864,7 @@ pub struct ListAuthorizersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAuthorizersResponse { ///

The authorizers.

#[serde(rename = "authorizers")] @@ -3561,7 +3893,7 @@ pub struct ListBillingGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBillingGroupsResponse { ///

The list of billing groups.

#[serde(rename = "billingGroups")] @@ -3592,7 +3924,7 @@ pub struct ListCACertificatesRequest { ///

The output from the ListCACertificates operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCACertificatesResponse { ///

The CA certificates registered in your AWS account.

#[serde(rename = "certificates")] @@ -3626,7 +3958,7 @@ pub struct ListCertificatesByCARequest { ///

The output of the ListCertificatesByCA operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCertificatesByCAResponse { ///

The device certificates signed by the specified CA certificate.

#[serde(rename = "certificates")] @@ -3657,7 +3989,7 @@ pub struct ListCertificatesRequest { ///

The output of the ListCertificates operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCertificatesResponse { ///

The descriptions of the certificates.

#[serde(rename = "certificates")] @@ -3682,7 +4014,7 @@ pub struct ListIndicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIndicesResponse { ///

The index names.

#[serde(rename = "indexNames")] @@ -3714,7 +4046,7 @@ pub struct ListJobExecutionsForJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobExecutionsForJobResponse { ///

A list of job execution summaries.

#[serde(rename = "executionSummaries")] @@ -3746,7 +4078,7 @@ pub struct ListJobExecutionsForThingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobExecutionsForThingResponse { ///

A list of job execution summaries.

#[serde(rename = "executionSummaries")] @@ -3787,7 +4119,7 @@ pub struct ListJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResponse { ///

A list of jobs.

#[serde(rename = "jobs")] @@ -3799,6 +4131,35 @@ pub struct ListJobsResponse { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListMitigationActionsRequest { + ///

Specify a value to limit the result to mitigation actions with a specific action type.

+ #[serde(rename = "actionType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_type: Option, + ///

The maximum number of results to return at one time. The default is 25.

+ #[serde(rename = "maxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The token for the next set of results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListMitigationActionsResponse { + ///

A set of actions that matched the specified filter criteria.

+ #[serde(rename = "actionIdentifiers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_identifiers: Option>, + ///

The token for the next set of results.

+ #[serde(rename = "nextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListOTAUpdatesRequest { ///

The maximum number of results to return at one time.

@@ -3816,7 +4177,7 @@ pub struct ListOTAUpdatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOTAUpdatesResponse { ///

A token to use to get the next set of results.

#[serde(rename = "nextToken")] @@ -3847,7 +4208,7 @@ pub struct ListOutgoingCertificatesRequest { ///

The output from the ListOutgoingCertificates operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOutgoingCertificatesResponse { ///

The marker for the next set of results.

#[serde(rename = "nextMarker")] @@ -3878,7 +4239,7 @@ pub struct ListPoliciesRequest { ///

The output from the ListPolicies operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPoliciesResponse { ///

The marker for the next set of results, or null if there are no additional results.

#[serde(rename = "nextMarker")] @@ -3912,7 +4273,7 @@ pub struct ListPolicyPrincipalsRequest { ///

The output from the ListPolicyPrincipals operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPolicyPrincipalsResponse { ///

The marker for the next set of results, or null if there are no additional results.

#[serde(rename = "nextMarker")] @@ -3934,7 +4295,7 @@ pub struct ListPolicyVersionsRequest { ///

The output from the ListPolicyVersions operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPolicyVersionsResponse { ///

The policy versions.

#[serde(rename = "policyVersions")] @@ -3964,7 +4325,7 @@ pub struct ListPrincipalPoliciesRequest { ///

The output from the ListPrincipalPolicies operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPrincipalPoliciesResponse { ///

The marker for the next set of results, or null if there are no additional results.

#[serde(rename = "nextMarker")] @@ -3994,7 +4355,7 @@ pub struct ListPrincipalThingsRequest { ///

The output from the ListPrincipalThings operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPrincipalThingsResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4023,7 +4384,7 @@ pub struct ListRoleAliasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRoleAliasesResponse { ///

A marker used to get the next set of results.

#[serde(rename = "nextMarker")] @@ -4048,7 +4409,7 @@ pub struct ListScheduledAuditsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListScheduledAuditsResponse { ///

A token that can be used to retrieve the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4070,7 +4431,7 @@ pub struct ListSecurityProfilesForTargetRequest { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

If true, return child groups as well.

+ ///

If true, return child groups too.

#[serde(rename = "recursive")] #[serde(skip_serializing_if = "Option::is_none")] pub recursive: Option, @@ -4080,7 +4441,7 @@ pub struct ListSecurityProfilesForTargetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSecurityProfilesForTargetResponse { ///

A token that can be used to retrieve the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4105,7 +4466,7 @@ pub struct ListSecurityProfilesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSecurityProfilesResponse { ///

A token that can be used to retrieve the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4134,7 +4495,7 @@ pub struct ListStreamsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStreamsResponse { ///

A token used to get the next set of results.

#[serde(rename = "nextToken")] @@ -4158,7 +4519,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4186,7 +4547,7 @@ pub struct ListTargetsForPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTargetsForPolicyResponse { ///

A marker used to get the next set of results.

#[serde(rename = "nextMarker")] @@ -4214,7 +4575,7 @@ pub struct ListTargetsForSecurityProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTargetsForSecurityProfileResponse { ///

A token that can be used to retrieve the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4242,7 +4603,7 @@ pub struct ListThingGroupsForThingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingGroupsForThingResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4279,7 +4640,7 @@ pub struct ListThingGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingGroupsResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4301,7 +4662,7 @@ pub struct ListThingPrincipalsRequest { ///

The output from the ListThingPrincipals operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingPrincipalsResponse { ///

The principals associated with the thing.

#[serde(rename = "principals")] @@ -4328,7 +4689,7 @@ pub struct ListThingRegistrationTaskReportsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingRegistrationTaskReportsResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4361,7 +4722,7 @@ pub struct ListThingRegistrationTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingRegistrationTasksResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4392,7 +4753,7 @@ pub struct ListThingTypesRequest { ///

The output for the ListThingTypes operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingTypesResponse { ///

The token for the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4420,7 +4781,7 @@ pub struct ListThingsInBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingsInBillingGroupResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4452,7 +4813,7 @@ pub struct ListThingsInThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingsInThingGroupResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4491,7 +4852,7 @@ pub struct ListThingsRequest { ///

The output from the ListThings operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListThingsResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -4526,7 +4887,7 @@ pub struct ListTopicRulesRequest { ///

The output from the ListTopicRules operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTopicRulesResponse { ///

A token used to retrieve the next value.

#[serde(rename = "nextToken")] @@ -4555,7 +4916,7 @@ pub struct ListV2LoggingLevelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListV2LoggingLevelsResponse { ///

The logging configuration for a target.

#[serde(rename = "logTargetConfigurations")] @@ -4594,13 +4955,13 @@ pub struct ListViolationEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListViolationEventsResponse { ///

A token that can be used to retrieve the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The security profile violation alerts issued for this account during the given time frame, potentially filtered by security profile, behavior violated, or thing (device) violating.

+ ///

The security profile violation alerts issued for this account during the given time period, potentially filtered by security profile, behavior violated, or thing (device) violating.

#[serde(rename = "violationEvents")] #[serde(skip_serializing_if = "Option::is_none")] pub violation_events: Option>, @@ -4620,7 +4981,7 @@ pub struct LogTarget { ///

The target configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogTargetConfiguration { ///

The logging level.

#[serde(rename = "logLevel")] @@ -4661,19 +5022,88 @@ pub struct MetricValue { pub ports: Option>, } -///

Information about the resource that was non-compliant with the audit check.

+///

Describes which changes should be applied as part of a mitigation action.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MitigationAction { + ///

The set of parameters for this mitigation action. The parameters vary, depending on the kind of action you apply.

+ #[serde(rename = "actionParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_params: Option, + ///

A unique identifier for the mitigation action.

+ #[serde(rename = "id")] + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option, + ///

A user-friendly name for the mitigation action.

+ #[serde(rename = "name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The IAM role ARN used to apply this mitigation action.

+ #[serde(rename = "roleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role_arn: Option, +} + +///

Information that identifies a mitigation action. This information is returned by ListMitigationActions.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MitigationActionIdentifier { + ///

The IAM role ARN used to apply this mitigation action.

+ #[serde(rename = "actionArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_arn: Option, + ///

The friendly name of the mitigation action.

+ #[serde(rename = "actionName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_name: Option, + ///

The date when this mitigation action was created.

+ #[serde(rename = "creationDate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_date: Option, +} + +///

The set of parameters for this mitigation action. You can specify only one type of parameter (in other words, you can apply only one action for each defined mitigation action).

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MitigationActionParams { + ///

Parameters to define a mitigation action that moves devices associated with a certificate to one or more specified thing groups, typically for quarantine.

+ #[serde(rename = "addThingsToThingGroupParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub add_things_to_thing_group_params: Option, + ///

Parameters to define a mitigation action that enables AWS IoT logging at a specified level of detail.

+ #[serde(rename = "enableIoTLoggingParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_io_t_logging_params: Option, + ///

Parameters to define a mitigation action that publishes findings to Amazon SNS. You can implement your own custom actions in response to the Amazon SNS messages.

+ #[serde(rename = "publishFindingToSnsParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub publish_finding_to_sns_params: Option, + ///

Parameters to define a mitigation action that adds a blank policy to restrict permissions.

+ #[serde(rename = "replaceDefaultPolicyVersionParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub replace_default_policy_version_params: Option, + ///

Parameters to define a mitigation action that changes the state of the CA certificate to inactive.

+ #[serde(rename = "updateCACertificateParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub update_ca_certificate_params: Option, + ///

Parameters to define a mitigation action that changes the state of the device certificate to inactive.

+ #[serde(rename = "updateDeviceCertificateParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub update_device_certificate_params: Option, +} + +///

Information about the resource that was noncompliant with the audit check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NonCompliantResource { - ///

Additional information about the non-compliant resource.

+ ///

Other information about the noncompliant resource.

#[serde(rename = "additionalInfo")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_info: Option<::std::collections::HashMap>, - ///

Information identifying the non-compliant resource.

+ ///

Information that identifies the noncompliant resource.

#[serde(rename = "resourceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_identifier: Option, - ///

The type of the non-compliant resource.

+ ///

The type of the noncompliant resource.

#[serde(rename = "resourceType")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_type: Option, @@ -4706,7 +5136,7 @@ pub struct OTAUpdateFile { ///

Information about an OTA update.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OTAUpdateInfo { ///

A collection of name/value pairs

#[serde(rename = "additionalParameters")] @@ -4768,7 +5198,7 @@ pub struct OTAUpdateInfo { ///

An OTA update summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OTAUpdateSummary { ///

The date when the OTA update was created.

#[serde(rename = "creationDate")] @@ -4786,7 +5216,7 @@ pub struct OTAUpdateSummary { ///

A certificate that has been transferred but not yet accepted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OutgoingCertificate { ///

The certificate ARN.

#[serde(rename = "certificateArn")] @@ -4816,7 +5246,7 @@ pub struct OutgoingCertificate { ///

Describes an AWS IoT policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Policy { ///

The policy ARN.

#[serde(rename = "policyArn")] @@ -4830,7 +5260,7 @@ pub struct Policy { ///

Describes a policy version.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyVersion { ///

The date and time the policy was created.

#[serde(rename = "createDate")] @@ -4872,10 +5302,18 @@ pub struct PresignedUrlConfig { pub role_arn: Option, } +///

Parameters to define a mitigation action that publishes findings to Amazon SNS. You can implement your own custom actions in response to the Amazon SNS messages.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PublishFindingToSnsParams { + ///

The ARN of the topic to which you want to publish the findings.

+ #[serde(rename = "topicArn")] + pub topic_arn: String, +} + ///

The input for the DynamoActionVS action that specifies the DynamoDB table to which the message data will be written.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PutItemInput { - ///

The table where the message data will be written

+ ///

The table where the message data will be written.

#[serde(rename = "tableName")] pub table_name: String, } @@ -4918,7 +5356,7 @@ pub struct RegisterCACertificateRequest { ///

The output from the RegisterCACertificateResponse operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterCACertificateResponse { ///

The CA certificate ARN.

#[serde(rename = "certificateArn")] @@ -4948,7 +5386,7 @@ pub struct RegisterCertificateRequest { ///

The output from the RegisterCertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterCertificateResponse { ///

The certificate ARN.

#[serde(rename = "certificateArn")] @@ -4972,7 +5410,7 @@ pub struct RegisterThingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterThingResponse { ///

.

#[serde(rename = "certificatePem")] @@ -5011,13 +5449,13 @@ pub struct RejectCertificateTransferRequest { ///

Information about a related resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelatedResource { - ///

Additional information about the resource.

+ ///

Other information about the resource.

#[serde(rename = "additionalInfo")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_info: Option<::std::collections::HashMap>, - ///

Information identifying the resource.

+ ///

Information that identifies the resource.

#[serde(rename = "resourceIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_identifier: Option, @@ -5048,7 +5486,7 @@ pub struct RemoveThingFromBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveThingFromBillingGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -5072,9 +5510,17 @@ pub struct RemoveThingFromThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveThingFromThingGroupResponse {} +///

Parameters to define a mitigation action that adds a blank policy to restrict permissions.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ReplaceDefaultPolicyVersionParams { + ///

The name of the template to be applied. The only supported value is BLANK_POLICY.

+ #[serde(rename = "templateName")] + pub template_name: String, +} + ///

The input for the ReplaceTopicRule operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ReplaceTopicRuleRequest { @@ -5089,6 +5535,10 @@ pub struct ReplaceTopicRuleRequest { ///

Describes an action to republish to another topic.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RepublishAction { + ///

The Quality of Service (QoS) level to use when republishing messages.

+ #[serde(rename = "qos")] + #[serde(skip_serializing_if = "Option::is_none")] + pub qos: Option, ///

The ARN of the IAM role that grants access.

#[serde(rename = "roleArn")] pub role_arn: String, @@ -5097,7 +5547,7 @@ pub struct RepublishAction { pub topic: String, } -///

Information identifying the non-compliant resource.

+///

Information that identifies the noncompliant resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ResourceIdentifier { ///

The account with which the resource is associated.

@@ -5112,7 +5562,7 @@ pub struct ResourceIdentifier { #[serde(rename = "clientId")] #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option, - ///

The ID of the Cognito Identity Pool.

+ ///

The ID of the Amazon Cognito identity pool.

#[serde(rename = "cognitoIdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub cognito_identity_pool_id: Option, @@ -5128,7 +5578,7 @@ pub struct ResourceIdentifier { ///

Role alias description.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RoleAliasDescription { ///

The UNIX timestamp of when the role alias was created.

#[serde(rename = "creationDate")] @@ -5221,7 +5671,7 @@ pub struct SalesforceAction { ///

Information about the scheduled audit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduledAuditMetadata { ///

The day of the month on which the scheduled audit is run (if the frequency is "MONTHLY"). If days 29-31 are specified, and the month does not have that many days, the audit takes place on the "LAST" day of the month.

#[serde(rename = "dayOfMonth")] @@ -5231,7 +5681,7 @@ pub struct ScheduledAuditMetadata { #[serde(rename = "dayOfWeek")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_week: Option, - ///

How often the scheduled audit takes place.

+ ///

How often the scheduled audit occurs.

#[serde(rename = "frequency")] #[serde(skip_serializing_if = "Option::is_none")] pub frequency: Option, @@ -5269,7 +5719,7 @@ pub struct SearchIndexRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchIndexResponse { ///

The token used to get the next set of results, or null if there are no additional results.

#[serde(rename = "nextToken")] @@ -5287,7 +5737,7 @@ pub struct SearchIndexResponse { ///

Identifying information for a Device Defender security profile.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityProfileIdentifier { ///

The ARN of the security profile.

#[serde(rename = "arn")] @@ -5299,7 +5749,7 @@ pub struct SecurityProfileIdentifier { ///

A target to which an alert is sent when a security profile behavior is violated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityProfileTarget { ///

The ARN of the security profile.

#[serde(rename = "arn")] @@ -5308,7 +5758,7 @@ pub struct SecurityProfileTarget { ///

Information about a security profile and the target associated with it.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecurityProfileTargetMapping { ///

Information that identifies the security profile.

#[serde(rename = "securityProfileIdentifier")] @@ -5328,7 +5778,7 @@ pub struct SetDefaultAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetDefaultAuthorizerResponse { ///

The authorizer ARN.

#[serde(rename = "authorizerArn")] @@ -5433,24 +5883,49 @@ pub struct SqsAction { } #[derive(Default, Debug, Clone, PartialEq, Serialize)] -pub struct StartOnDemandAuditTaskRequest { - ///

Which checks are performed during the audit. The checks you specify must be enabled for your account or an exception occurs. Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.

- #[serde(rename = "targetCheckNames")] - pub target_check_names: Vec, +pub struct StartAuditMitigationActionsTaskRequest { + ///

For an audit check, specifies which mitigation actions to apply. Those actions must be defined in your AWS account.

+ #[serde(rename = "auditCheckToActionsMapping")] + pub audit_check_to_actions_mapping: ::std::collections::HashMap>, + ///

Each audit mitigation task must have a unique client request token. If you try to start a new task with the same token as a task that already exists, an exception occurs. If you omit this value, a unique client request token is generated automatically.

+ #[serde(rename = "clientRequestToken")] + pub client_request_token: String, + ///

Specifies the audit findings to which the mitigation actions are applied. You can apply them to a type of audit check, to all findings from an audit, or to a speecific set of findings.

+ #[serde(rename = "target")] + pub target: AuditMitigationActionsTaskTarget, + ///

A unique identifier for the task. You can use this identifier to check the status of the task or to cancel it.

+ #[serde(rename = "taskId")] + pub task_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] -pub struct StartOnDemandAuditTaskResponse { - ///

The ID of the on-demand audit you started.

+#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartAuditMitigationActionsTaskResponse { + ///

The unique identifier for the audit mitigation task. This matches the taskId that you specified in the request.

#[serde(rename = "taskId")] #[serde(skip_serializing_if = "Option::is_none")] pub task_id: Option, } -///

Information required to start a signing job.

-#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct StartSigningJobParameter { +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartOnDemandAuditTaskRequest { + ///

Which checks are performed during the audit. The checks you specify must be enabled for your account or an exception occurs. Use DescribeAccountAuditConfiguration to see the list of all checks, including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.

+ #[serde(rename = "targetCheckNames")] + pub target_check_names: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartOnDemandAuditTaskResponse { + ///

The ID of the on-demand audit you started.

+ #[serde(rename = "taskId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub task_id: Option, +} + +///

Information required to start a signing job.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StartSigningJobParameter { ///

The location to write the code-signed file.

#[serde(rename = "destination")] #[serde(skip_serializing_if = "Option::is_none")] @@ -5482,7 +5957,7 @@ pub struct StartThingRegistrationTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartThingRegistrationTaskResponse { ///

The bulk thing provisioning task ID.

#[serde(rename = "taskId")] @@ -5501,7 +5976,7 @@ pub struct StatisticalThreshold { ///

A map of key-value pairs for all supported statistics. Currently, only count is supported.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Statistics { ///

The count of things that match the query.

#[serde(rename = "count")] @@ -5532,7 +6007,7 @@ pub struct StopThingRegistrationTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopThingRegistrationTaskResponse {} ///

Describes a group of files that can be streamed.

@@ -5563,7 +6038,7 @@ pub struct StreamFile { ///

Information about a stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamInfo { ///

The date when the stream was created.

#[serde(rename = "createdAt")] @@ -5601,7 +6076,7 @@ pub struct StreamInfo { ///

A summary of a stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamSummary { ///

A description of the stream.

#[serde(rename = "description")] @@ -5645,12 +6120,12 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Statistics for the checks performed during the audit.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskStatistics { ///

The number of checks that did not run because the audit was canceled.

#[serde(rename = "canceledChecks")] @@ -5660,7 +6135,7 @@ pub struct TaskStatistics { #[serde(rename = "compliantChecks")] #[serde(skip_serializing_if = "Option::is_none")] pub compliant_checks: Option, - ///

The number of checks

+ ///

The number of checks.

#[serde(rename = "failedChecks")] #[serde(skip_serializing_if = "Option::is_none")] pub failed_checks: Option, @@ -5668,7 +6143,7 @@ pub struct TaskStatistics { #[serde(rename = "inProgressChecks")] #[serde(skip_serializing_if = "Option::is_none")] pub in_progress_checks: Option, - ///

The number of checks that found non-compliant resources.

+ ///

The number of checks that found noncompliant resources.

#[serde(rename = "nonCompliantChecks")] #[serde(skip_serializing_if = "Option::is_none")] pub non_compliant_checks: Option, @@ -5682,6 +6157,32 @@ pub struct TaskStatistics { pub waiting_for_data_collection_checks: Option, } +///

Provides summary counts of how many tasks for findings are in a particular state. This information is included in the response from DescribeAuditMitigationActionsTask.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TaskStatisticsForAuditCheck { + ///

The number of findings to which the mitigation action task was canceled when applied.

+ #[serde(rename = "canceledFindingsCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub canceled_findings_count: Option, + ///

The number of findings for which at least one of the actions failed when applied.

+ #[serde(rename = "failedFindingsCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_findings_count: Option, + ///

The number of findings skipped because of filter conditions provided in the parameters to the command.

+ #[serde(rename = "skippedFindingsCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub skipped_findings_count: Option, + ///

The number of findings for which all mitigation actions succeeded when applied.

+ #[serde(rename = "succeededFindingsCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub succeeded_findings_count: Option, + ///

The total number of findings to which a task is being applied.

+ #[serde(rename = "totalFindingsCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub total_findings_count: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TestAuthorizationRequest { ///

A list of authorization info objects. Simulating authorization will create a response for each authInfo object in the list.

@@ -5710,7 +6211,7 @@ pub struct TestAuthorizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestAuthorizationResponse { ///

The authentication results.

#[serde(rename = "authResults")] @@ -5732,7 +6233,7 @@ pub struct TestInvokeAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestInvokeAuthorizerResponse { ///

The number of seconds after which the connection is terminated.

#[serde(rename = "disconnectAfterInSeconds")] @@ -5758,7 +6259,7 @@ pub struct TestInvokeAuthorizerResponse { ///

The properties of the thing, including thing name, thing type name, and a list of thing attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingAttribute { ///

A list of thing attributes which are name-value pairs.

#[serde(rename = "attributes")] @@ -5784,7 +6285,7 @@ pub struct ThingAttribute { ///

The connectivity status of the thing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingConnectivity { ///

True if the thing is connected to the AWS IoT service; false if it is not connected.

#[serde(rename = "connected")] @@ -5798,7 +6299,7 @@ pub struct ThingConnectivity { ///

The thing search index document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingDocument { ///

The attributes.

#[serde(rename = "attributes")] @@ -5832,7 +6333,7 @@ pub struct ThingDocument { ///

The thing group search index document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingGroupDocument { ///

The thing group attributes.

#[serde(rename = "attributes")] @@ -5866,7 +6367,7 @@ pub struct ThingGroupIndexingConfiguration { ///

Thing group metadata.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingGroupMetadata { ///

The UNIX timestamp of when the thing group was created.

#[serde(rename = "creationDate")] @@ -5909,7 +6410,7 @@ pub struct ThingIndexingConfiguration { ///

The definition of the thing type, including thing type name and description.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingTypeDefinition { ///

The thing type ARN.

#[serde(rename = "thingTypeArn")] @@ -5931,7 +6432,7 @@ pub struct ThingTypeDefinition { ///

The ThingTypeMetadata contains additional information about the thing type including: creation date and time, a value indicating whether the thing type is deprecated, and a date and time when time was deprecated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ThingTypeMetadata { ///

The date and time when the thing type was created.

#[serde(rename = "creationDate")] @@ -5971,7 +6472,7 @@ pub struct TimeoutConfig { ///

Describes a rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TopicRule { ///

The actions associated with the rule.

#[serde(rename = "actions")] @@ -6009,7 +6510,7 @@ pub struct TopicRule { ///

Describes a rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TopicRuleListItem { ///

The date and time the rule was created.

#[serde(rename = "createdAt")] @@ -6077,7 +6578,7 @@ pub struct TransferCertificateRequest { ///

The output from the TransferCertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransferCertificateResponse { ///

The ARN of the certificate.

#[serde(rename = "transferredCertificateArn")] @@ -6087,7 +6588,7 @@ pub struct TransferCertificateResponse { ///

Data used to transfer a certificate to an AWS account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransferData { ///

The date the transfer was accepted.

#[serde(rename = "acceptDate")] @@ -6122,12 +6623,12 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateAccountAuditConfigurationRequest { - ///

Specifies which audit checks are enabled and disabled for this account. Use DescribeAccountAuditConfiguration to see the list of all checks including those that are currently enabled.

Note that some data collection may begin immediately when certain checks are enabled. When a check is disabled, any data collected so far in relation to the check is deleted.

You cannot disable a check if it is used by any scheduled audit. You must first delete the check from the scheduled audit or delete the scheduled audit itself.

On the first call to UpdateAccountAuditConfiguration this parameter is required and must specify at least one enabled check.

+ ///

Specifies which audit checks are enabled and disabled for this account. Use DescribeAccountAuditConfiguration to see the list of all checks, including those that are currently enabled.

Some data collection might start immediately when certain checks are enabled. When a check is disabled, any data collected so far in relation to the check is deleted.

You cannot disable a check if it is used by any scheduled audit. You must first delete the check from the scheduled audit or delete the scheduled audit itself.

On the first call to UpdateAccountAuditConfiguration, this parameter is required and must specify at least one enabled check.

#[serde(rename = "auditCheckConfigurations")] #[serde(skip_serializing_if = "Option::is_none")] pub audit_check_configurations: @@ -6137,14 +6638,14 @@ pub struct UpdateAccountAuditConfigurationRequest { #[serde(skip_serializing_if = "Option::is_none")] pub audit_notification_target_configurations: Option<::std::collections::HashMap>, - ///

The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates and other items as necessary when performing an audit.

+ ///

The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates and other items as required when performing an audit.

#[serde(rename = "roleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAccountAuditConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -6171,7 +6672,7 @@ pub struct UpdateAuthorizerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAuthorizerResponse { ///

The authorizer ARN.

#[serde(rename = "authorizerArn")] @@ -6198,7 +6699,7 @@ pub struct UpdateBillingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBillingGroupResponse { ///

The latest version of the billing group.

#[serde(rename = "version")] @@ -6206,6 +6707,14 @@ pub struct UpdateBillingGroupResponse { pub version: Option, } +///

Parameters to define a mitigation action that changes the state of the CA certificate to inactive.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct UpdateCACertificateParams { + ///

The action that you want to apply to the CA cerrtificate. The only supported value is DEACTIVATE.

+ #[serde(rename = "action")] + pub action: String, +} + ///

The input to the UpdateCACertificate operation.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateCACertificateRequest { @@ -6224,7 +6733,7 @@ pub struct UpdateCACertificateRequest { #[serde(rename = "registrationConfig")] #[serde(skip_serializing_if = "Option::is_none")] pub registration_config: Option, - ///

If true, remove auto registration.

+ ///

If true, removes auto registration.

#[serde(rename = "removeAutoRegistration")] #[serde(skip_serializing_if = "Option::is_none")] pub remove_auto_registration: Option, @@ -6241,6 +6750,14 @@ pub struct UpdateCertificateRequest { pub new_status: String, } +///

Parameters to define a mitigation action that changes the state of the device certificate to inactive.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct UpdateDeviceCertificateParams { + ///

The action that you want to apply to the device cerrtificate. The only supported value is DEACTIVATE.

+ #[serde(rename = "action")] + pub action: String, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDynamicThingGroupRequest { ///

The expected version of the dynamic thing group to update.

@@ -6268,7 +6785,7 @@ pub struct UpdateDynamicThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDynamicThingGroupResponse { ///

The dynamic thing group version.

#[serde(rename = "version")] @@ -6285,7 +6802,7 @@ pub struct UpdateEventConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEventConfigurationsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -6301,7 +6818,7 @@ pub struct UpdateIndexingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIndexingConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -6331,6 +6848,34 @@ pub struct UpdateJobRequest { pub timeout_config: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateMitigationActionRequest { + ///

The friendly name for the mitigation action. You can't change the name by using UpdateMitigationAction. Instead, you must delete and re-create the mitigation action with the new name.

+ #[serde(rename = "actionName")] + pub action_name: String, + ///

Defines the type of action and the parameters for that action.

+ #[serde(rename = "actionParams")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_params: Option, + ///

The ARN of the IAM role that is used to apply the mitigation action.

+ #[serde(rename = "roleArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role_arn: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateMitigationActionResponse { + ///

The ARN for the new mitigation action.

+ #[serde(rename = "actionArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_arn: Option, + ///

A unique identifier for the mitigation action.

+ #[serde(rename = "actionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateRoleAliasRequest { ///

The number of seconds the credential will be valid.

@@ -6347,7 +6892,7 @@ pub struct UpdateRoleAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRoleAliasResponse { ///

The role alias.

#[serde(rename = "roleAlias")] @@ -6365,25 +6910,25 @@ pub struct UpdateScheduledAuditRequest { #[serde(rename = "dayOfMonth")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_month: Option, - ///

The day of the week on which the scheduled audit takes place. Can be one of "SUN", "MON", "TUE", "WED", "THU", "FRI" or "SAT". This field is required if the "frequency" parameter is set to "WEEKLY" or "BIWEEKLY".

+ ///

The day of the week on which the scheduled audit takes place. Can be one of "SUN", "MON", "TUE", "WED", "THU", "FRI", or "SAT". This field is required if the "frequency" parameter is set to "WEEKLY" or "BIWEEKLY".

#[serde(rename = "dayOfWeek")] #[serde(skip_serializing_if = "Option::is_none")] pub day_of_week: Option, - ///

How often the scheduled audit takes place. Can be one of "DAILY", "WEEKLY", "BIWEEKLY" or "MONTHLY". The actual start time of each audit is determined by the system.

+ ///

How often the scheduled audit takes place. Can be one of "DAILY", "WEEKLY", "BIWEEKLY", or "MONTHLY". The start time of each audit is determined by the system.

#[serde(rename = "frequency")] #[serde(skip_serializing_if = "Option::is_none")] pub frequency: Option, ///

The name of the scheduled audit. (Max. 128 chars)

#[serde(rename = "scheduledAuditName")] pub scheduled_audit_name: String, - ///

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

+ ///

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration to select which checks are enabled.)

#[serde(rename = "targetCheckNames")] #[serde(skip_serializing_if = "Option::is_none")] pub target_check_names: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateScheduledAuditResponse { ///

The ARN of the scheduled audit.

#[serde(rename = "scheduledAuditArn")] @@ -6393,7 +6938,7 @@ pub struct UpdateScheduledAuditResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateSecurityProfileRequest { - ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors but it is also retained for any metric specified here.

+ ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

#[serde(rename = "additionalMetricsToRetain")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_metrics_to_retain: Option>, @@ -6405,19 +6950,19 @@ pub struct UpdateSecurityProfileRequest { #[serde(rename = "behaviors")] #[serde(skip_serializing_if = "Option::is_none")] pub behaviors: Option>, - ///

If true, delete all additionalMetricsToRetain defined for this security profile. If any additionalMetricsToRetain are defined in the current invocation an exception occurs.

+ ///

If true, delete all additionalMetricsToRetain defined for this security profile. If any additionalMetricsToRetain are defined in the current invocation, an exception occurs.

#[serde(rename = "deleteAdditionalMetricsToRetain")] #[serde(skip_serializing_if = "Option::is_none")] pub delete_additional_metrics_to_retain: Option, - ///

If true, delete all alertTargets defined for this security profile. If any alertTargets are defined in the current invocation an exception occurs.

+ ///

If true, delete all alertTargets defined for this security profile. If any alertTargets are defined in the current invocation, an exception occurs.

#[serde(rename = "deleteAlertTargets")] #[serde(skip_serializing_if = "Option::is_none")] pub delete_alert_targets: Option, - ///

If true, delete all behaviors defined for this security profile. If any behaviors are defined in the current invocation an exception occurs.

+ ///

If true, delete all behaviors defined for this security profile. If any behaviors are defined in the current invocation, an exception occurs.

#[serde(rename = "deleteBehaviors")] #[serde(skip_serializing_if = "Option::is_none")] pub delete_behaviors: Option, - ///

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.

+ ///

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different from the actual version, a VersionConflictException is thrown.

#[serde(rename = "expectedVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub expected_version: Option, @@ -6431,9 +6976,9 @@ pub struct UpdateSecurityProfileRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSecurityProfileResponse { - ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the security profile's behaviors but it is also retained for any metric specified here.

+ ///

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the security profile's behaviors, but it is also retained for any metric specified here.

#[serde(rename = "additionalMetricsToRetain")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_metrics_to_retain: Option>, @@ -6491,7 +7036,7 @@ pub struct UpdateStreamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateStreamResponse { ///

A description of the stream.

#[serde(rename = "description")] @@ -6526,7 +7071,7 @@ pub struct UpdateThingGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateThingGroupResponse { ///

The version of the updated thing group.

#[serde(rename = "version")] @@ -6555,7 +7100,7 @@ pub struct UpdateThingGroupsForThingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateThingGroupsForThingResponse {} ///

The input for the UpdateThing operation.

@@ -6584,7 +7129,7 @@ pub struct UpdateThingRequest { ///

The output from the UpdateThing operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateThingResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -6595,7 +7140,7 @@ pub struct ValidateSecurityProfileBehaviorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValidateSecurityProfileBehaviorsResponse { ///

True if the behaviors were valid.

#[serde(rename = "valid")] @@ -6609,7 +7154,7 @@ pub struct ValidateSecurityProfileBehaviorsResponse { ///

Information about an error found in a behavior specification.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValidationError { ///

The description of an error found in the behaviors.

#[serde(rename = "errorMessage")] @@ -6619,7 +7164,7 @@ pub struct ValidationError { ///

Information about a Device Defender security profile behavior violation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ViolationEvent { ///

The behavior which was violated.

#[serde(rename = "behavior")] @@ -7197,6 +7742,67 @@ impl Error for AttachThingPrincipalError { } } } +/// Errors returned by CancelAuditMitigationActionsTask +#[derive(Debug, PartialEq)] +pub enum CancelAuditMitigationActionsTaskError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The specified resource does not exist.

+ ResourceNotFound(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl CancelAuditMitigationActionsTaskError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service( + CancelAuditMitigationActionsTaskError::InternalFailure(err.msg), + ) + } + "InvalidRequestException" => { + return RusotoError::Service( + CancelAuditMitigationActionsTaskError::InvalidRequest(err.msg), + ) + } + "ResourceNotFoundException" => { + return RusotoError::Service( + CancelAuditMitigationActionsTaskError::ResourceNotFound(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(CancelAuditMitigationActionsTaskError::Throttling( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CancelAuditMitigationActionsTaskError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CancelAuditMitigationActionsTaskError { + fn description(&self) -> &str { + match *self { + CancelAuditMitigationActionsTaskError::InternalFailure(ref cause) => cause, + CancelAuditMitigationActionsTaskError::InvalidRequest(ref cause) => cause, + CancelAuditMitigationActionsTaskError::ResourceNotFound(ref cause) => cause, + CancelAuditMitigationActionsTaskError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by CancelAuditTask #[derive(Debug, PartialEq)] pub enum CancelAuditTaskError { @@ -7918,6 +8524,71 @@ impl Error for CreateKeysAndCertificateError { } } } +/// Errors returned by CreateMitigationAction +#[derive(Debug, PartialEq)] +pub enum CreateMitigationActionError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

A limit has been exceeded.

+ LimitExceeded(String), + ///

The resource already exists.

+ ResourceAlreadyExists(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl CreateMitigationActionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(CreateMitigationActionError::InternalFailure( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(CreateMitigationActionError::InvalidRequest( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(CreateMitigationActionError::LimitExceeded( + err.msg, + )) + } + "ResourceAlreadyExistsException" => { + return RusotoError::Service( + CreateMitigationActionError::ResourceAlreadyExists(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(CreateMitigationActionError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateMitigationActionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateMitigationActionError { + fn description(&self) -> &str { + match *self { + CreateMitigationActionError::InternalFailure(ref cause) => cause, + CreateMitigationActionError::InvalidRequest(ref cause) => cause, + CreateMitigationActionError::LimitExceeded(ref cause) => cause, + CreateMitigationActionError::ResourceAlreadyExists(ref cause) => cause, + CreateMitigationActionError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by CreateOTAUpdate #[derive(Debug, PartialEq)] pub enum CreateOTAUpdateError { @@ -8225,6 +8896,8 @@ pub enum CreateScheduledAuditError { InvalidRequest(String), ///

A limit has been exceeded.

LimitExceeded(String), + ///

The resource already exists.

+ ResourceAlreadyExists(String), ///

The rate exceeds the limit.

Throttling(String), } @@ -8244,6 +8917,11 @@ impl CreateScheduledAuditError { "LimitExceededException" => { return RusotoError::Service(CreateScheduledAuditError::LimitExceeded(err.msg)) } + "ResourceAlreadyExistsException" => { + return RusotoError::Service(CreateScheduledAuditError::ResourceAlreadyExists( + err.msg, + )) + } "ThrottlingException" => { return RusotoError::Service(CreateScheduledAuditError::Throttling(err.msg)) } @@ -8265,6 +8943,7 @@ impl Error for CreateScheduledAuditError { CreateScheduledAuditError::InternalFailure(ref cause) => cause, CreateScheduledAuditError::InvalidRequest(ref cause) => cause, CreateScheduledAuditError::LimitExceeded(ref cause) => cause, + CreateScheduledAuditError::ResourceAlreadyExists(ref cause) => cause, CreateScheduledAuditError::Throttling(ref cause) => cause, } } @@ -9169,6 +9848,55 @@ impl Error for DeleteJobExecutionError { } } } +/// Errors returned by DeleteMitigationAction +#[derive(Debug, PartialEq)] +pub enum DeleteMitigationActionError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl DeleteMitigationActionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(DeleteMitigationActionError::InternalFailure( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(DeleteMitigationActionError::InvalidRequest( + err.msg, + )) + } + "ThrottlingException" => { + return RusotoError::Service(DeleteMitigationActionError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteMitigationActionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteMitigationActionError { + fn description(&self) -> &str { + match *self { + DeleteMitigationActionError::InternalFailure(ref cause) => cause, + DeleteMitigationActionError::InvalidRequest(ref cause) => cause, + DeleteMitigationActionError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by DeleteOTAUpdate #[derive(Debug, PartialEq)] pub enum DeleteOTAUpdateError { @@ -10092,9 +10820,9 @@ impl Error for DescribeAccountAuditConfigurationError { } } } -/// Errors returned by DescribeAuditTask +/// Errors returned by DescribeAuditFinding #[derive(Debug, PartialEq)] -pub enum DescribeAuditTaskError { +pub enum DescribeAuditFindingError { ///

An unexpected error has occurred.

InternalFailure(String), ///

The request is not valid.

@@ -10105,21 +10833,25 @@ pub enum DescribeAuditTaskError { Throttling(String), } -impl DescribeAuditTaskError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeAuditFindingError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "InternalFailureException" => { - return RusotoError::Service(DescribeAuditTaskError::InternalFailure(err.msg)) + return RusotoError::Service(DescribeAuditFindingError::InternalFailure( + err.msg, + )) } "InvalidRequestException" => { - return RusotoError::Service(DescribeAuditTaskError::InvalidRequest(err.msg)) + return RusotoError::Service(DescribeAuditFindingError::InvalidRequest(err.msg)) } "ResourceNotFoundException" => { - return RusotoError::Service(DescribeAuditTaskError::ResourceNotFound(err.msg)) + return RusotoError::Service(DescribeAuditFindingError::ResourceNotFound( + err.msg, + )) } "ThrottlingException" => { - return RusotoError::Service(DescribeAuditTaskError::Throttling(err.msg)) + return RusotoError::Service(DescribeAuditFindingError::Throttling(err.msg)) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -10128,61 +10860,59 @@ impl DescribeAuditTaskError { return RusotoError::Unknown(res); } } -impl fmt::Display for DescribeAuditTaskError { +impl fmt::Display for DescribeAuditFindingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeAuditTaskError { +impl Error for DescribeAuditFindingError { fn description(&self) -> &str { match *self { - DescribeAuditTaskError::InternalFailure(ref cause) => cause, - DescribeAuditTaskError::InvalidRequest(ref cause) => cause, - DescribeAuditTaskError::ResourceNotFound(ref cause) => cause, - DescribeAuditTaskError::Throttling(ref cause) => cause, + DescribeAuditFindingError::InternalFailure(ref cause) => cause, + DescribeAuditFindingError::InvalidRequest(ref cause) => cause, + DescribeAuditFindingError::ResourceNotFound(ref cause) => cause, + DescribeAuditFindingError::Throttling(ref cause) => cause, } } } -/// Errors returned by DescribeAuthorizer +/// Errors returned by DescribeAuditMitigationActionsTask #[derive(Debug, PartialEq)] -pub enum DescribeAuthorizerError { +pub enum DescribeAuditMitigationActionsTaskError { ///

An unexpected error has occurred.

InternalFailure(String), ///

The request is not valid.

InvalidRequest(String), ///

The specified resource does not exist.

ResourceNotFound(String), - ///

The service is temporarily unavailable.

- ServiceUnavailable(String), ///

The rate exceeds the limit.

Throttling(String), - ///

You are not authorized to perform this operation.

- Unauthorized(String), } -impl DescribeAuthorizerError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeAuditMitigationActionsTaskError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "InternalFailureException" => { - return RusotoError::Service(DescribeAuthorizerError::InternalFailure(err.msg)) + return RusotoError::Service( + DescribeAuditMitigationActionsTaskError::InternalFailure(err.msg), + ) } "InvalidRequestException" => { - return RusotoError::Service(DescribeAuthorizerError::InvalidRequest(err.msg)) + return RusotoError::Service( + DescribeAuditMitigationActionsTaskError::InvalidRequest(err.msg), + ) } "ResourceNotFoundException" => { - return RusotoError::Service(DescribeAuthorizerError::ResourceNotFound(err.msg)) - } - "ServiceUnavailableException" => { - return RusotoError::Service(DescribeAuthorizerError::ServiceUnavailable( - err.msg, - )) + return RusotoError::Service( + DescribeAuditMitigationActionsTaskError::ResourceNotFound(err.msg), + ) } "ThrottlingException" => { - return RusotoError::Service(DescribeAuthorizerError::Throttling(err.msg)) - } - "UnauthorizedException" => { - return RusotoError::Service(DescribeAuthorizerError::Unauthorized(err.msg)) + return RusotoError::Service( + DescribeAuditMitigationActionsTaskError::Throttling(err.msg), + ) } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} @@ -10191,26 +10921,24 @@ impl DescribeAuthorizerError { return RusotoError::Unknown(res); } } -impl fmt::Display for DescribeAuthorizerError { +impl fmt::Display for DescribeAuditMitigationActionsTaskError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } -impl Error for DescribeAuthorizerError { +impl Error for DescribeAuditMitigationActionsTaskError { fn description(&self) -> &str { match *self { - DescribeAuthorizerError::InternalFailure(ref cause) => cause, - DescribeAuthorizerError::InvalidRequest(ref cause) => cause, - DescribeAuthorizerError::ResourceNotFound(ref cause) => cause, - DescribeAuthorizerError::ServiceUnavailable(ref cause) => cause, - DescribeAuthorizerError::Throttling(ref cause) => cause, - DescribeAuthorizerError::Unauthorized(ref cause) => cause, + DescribeAuditMitigationActionsTaskError::InternalFailure(ref cause) => cause, + DescribeAuditMitigationActionsTaskError::InvalidRequest(ref cause) => cause, + DescribeAuditMitigationActionsTaskError::ResourceNotFound(ref cause) => cause, + DescribeAuditMitigationActionsTaskError::Throttling(ref cause) => cause, } } } -/// Errors returned by DescribeBillingGroup +/// Errors returned by DescribeAuditTask #[derive(Debug, PartialEq)] -pub enum DescribeBillingGroupError { +pub enum DescribeAuditTaskError { ///

An unexpected error has occurred.

InternalFailure(String), ///

The request is not valid.

@@ -10221,20 +10949,136 @@ pub enum DescribeBillingGroupError { Throttling(String), } -impl DescribeBillingGroupError { - pub fn from_response(res: BufferedHttpResponse) -> RusotoError { +impl DescribeAuditTaskError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { "InternalFailureException" => { - return RusotoError::Service(DescribeBillingGroupError::InternalFailure( - err.msg, - )) + return RusotoError::Service(DescribeAuditTaskError::InternalFailure(err.msg)) } "InvalidRequestException" => { - return RusotoError::Service(DescribeBillingGroupError::InvalidRequest(err.msg)) + return RusotoError::Service(DescribeAuditTaskError::InvalidRequest(err.msg)) } "ResourceNotFoundException" => { - return RusotoError::Service(DescribeBillingGroupError::ResourceNotFound( + return RusotoError::Service(DescribeAuditTaskError::ResourceNotFound(err.msg)) + } + "ThrottlingException" => { + return RusotoError::Service(DescribeAuditTaskError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeAuditTaskError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeAuditTaskError { + fn description(&self) -> &str { + match *self { + DescribeAuditTaskError::InternalFailure(ref cause) => cause, + DescribeAuditTaskError::InvalidRequest(ref cause) => cause, + DescribeAuditTaskError::ResourceNotFound(ref cause) => cause, + DescribeAuditTaskError::Throttling(ref cause) => cause, + } + } +} +/// Errors returned by DescribeAuthorizer +#[derive(Debug, PartialEq)] +pub enum DescribeAuthorizerError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The specified resource does not exist.

+ ResourceNotFound(String), + ///

The service is temporarily unavailable.

+ ServiceUnavailable(String), + ///

The rate exceeds the limit.

+ Throttling(String), + ///

You are not authorized to perform this operation.

+ Unauthorized(String), +} + +impl DescribeAuthorizerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(DescribeAuthorizerError::InternalFailure(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(DescribeAuthorizerError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeAuthorizerError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeAuthorizerError::ServiceUnavailable( + err.msg, + )) + } + "ThrottlingException" => { + return RusotoError::Service(DescribeAuthorizerError::Throttling(err.msg)) + } + "UnauthorizedException" => { + return RusotoError::Service(DescribeAuthorizerError::Unauthorized(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeAuthorizerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeAuthorizerError { + fn description(&self) -> &str { + match *self { + DescribeAuthorizerError::InternalFailure(ref cause) => cause, + DescribeAuthorizerError::InvalidRequest(ref cause) => cause, + DescribeAuthorizerError::ResourceNotFound(ref cause) => cause, + DescribeAuthorizerError::ServiceUnavailable(ref cause) => cause, + DescribeAuthorizerError::Throttling(ref cause) => cause, + DescribeAuthorizerError::Unauthorized(ref cause) => cause, + } + } +} +/// Errors returned by DescribeBillingGroup +#[derive(Debug, PartialEq)] +pub enum DescribeBillingGroupError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The specified resource does not exist.

+ ResourceNotFound(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl DescribeBillingGroupError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(DescribeBillingGroupError::InternalFailure( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(DescribeBillingGroupError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeBillingGroupError::ResourceNotFound( err.msg, )) } @@ -10741,6 +11585,63 @@ impl Error for DescribeJobExecutionError { } } } +/// Errors returned by DescribeMitigationAction +#[derive(Debug, PartialEq)] +pub enum DescribeMitigationActionError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The specified resource does not exist.

+ ResourceNotFound(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl DescribeMitigationActionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(DescribeMitigationActionError::InternalFailure( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(DescribeMitigationActionError::InvalidRequest( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeMitigationActionError::ResourceNotFound( + err.msg, + )) + } + "ThrottlingException" => { + return RusotoError::Service(DescribeMitigationActionError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeMitigationActionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeMitigationActionError { + fn description(&self) -> &str { + match *self { + DescribeMitigationActionError::InternalFailure(ref cause) => cause, + DescribeMitigationActionError::InvalidRequest(ref cause) => cause, + DescribeMitigationActionError::ResourceNotFound(ref cause) => cause, + DescribeMitigationActionError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by DescribeRoleAlias #[derive(Debug, PartialEq)] pub enum DescribeRoleAliasError { @@ -12449,6 +13350,112 @@ impl Error for ListAuditFindingsError { } } } +/// Errors returned by ListAuditMitigationActionsExecutions +#[derive(Debug, PartialEq)] +pub enum ListAuditMitigationActionsExecutionsError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl ListAuditMitigationActionsExecutionsError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service( + ListAuditMitigationActionsExecutionsError::InternalFailure(err.msg), + ) + } + "InvalidRequestException" => { + return RusotoError::Service( + ListAuditMitigationActionsExecutionsError::InvalidRequest(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service( + ListAuditMitigationActionsExecutionsError::Throttling(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListAuditMitigationActionsExecutionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListAuditMitigationActionsExecutionsError { + fn description(&self) -> &str { + match *self { + ListAuditMitigationActionsExecutionsError::InternalFailure(ref cause) => cause, + ListAuditMitigationActionsExecutionsError::InvalidRequest(ref cause) => cause, + ListAuditMitigationActionsExecutionsError::Throttling(ref cause) => cause, + } + } +} +/// Errors returned by ListAuditMitigationActionsTasks +#[derive(Debug, PartialEq)] +pub enum ListAuditMitigationActionsTasksError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl ListAuditMitigationActionsTasksError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service( + ListAuditMitigationActionsTasksError::InternalFailure(err.msg), + ) + } + "InvalidRequestException" => { + return RusotoError::Service( + ListAuditMitigationActionsTasksError::InvalidRequest(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(ListAuditMitigationActionsTasksError::Throttling( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListAuditMitigationActionsTasksError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListAuditMitigationActionsTasksError { + fn description(&self) -> &str { + match *self { + ListAuditMitigationActionsTasksError::InternalFailure(ref cause) => cause, + ListAuditMitigationActionsTasksError::InvalidRequest(ref cause) => cause, + ListAuditMitigationActionsTasksError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by ListAuditTasks #[derive(Debug, PartialEq)] pub enum ListAuditTasksError { @@ -13003,6 +14010,55 @@ impl Error for ListJobsError { } } } +/// Errors returned by ListMitigationActions +#[derive(Debug, PartialEq)] +pub enum ListMitigationActionsError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl ListMitigationActionsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(ListMitigationActionsError::InternalFailure( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(ListMitigationActionsError::InvalidRequest( + err.msg, + )) + } + "ThrottlingException" => { + return RusotoError::Service(ListMitigationActionsError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListMitigationActionsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListMitigationActionsError { + fn description(&self) -> &str { + match *self { + ListMitigationActionsError::InternalFailure(ref cause) => cause, + ListMitigationActionsError::InvalidRequest(ref cause) => cause, + ListMitigationActionsError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by ListOTAUpdates #[derive(Debug, PartialEq)] pub enum ListOTAUpdatesError { @@ -15457,6 +16513,75 @@ impl Error for SetV2LoggingOptionsError { } } } +/// Errors returned by StartAuditMitigationActionsTask +#[derive(Debug, PartialEq)] +pub enum StartAuditMitigationActionsTaskError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

A limit has been exceeded.

+ LimitExceeded(String), + ///

This exception occurs if you attempt to start a task with the same task-id as an existing task but with a different clientRequestToken.

+ TaskAlreadyExists(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl StartAuditMitigationActionsTaskError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service( + StartAuditMitigationActionsTaskError::InternalFailure(err.msg), + ) + } + "InvalidRequestException" => { + return RusotoError::Service( + StartAuditMitigationActionsTaskError::InvalidRequest(err.msg), + ) + } + "LimitExceededException" => { + return RusotoError::Service( + StartAuditMitigationActionsTaskError::LimitExceeded(err.msg), + ) + } + "TaskAlreadyExistsException" => { + return RusotoError::Service( + StartAuditMitigationActionsTaskError::TaskAlreadyExists(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(StartAuditMitigationActionsTaskError::Throttling( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartAuditMitigationActionsTaskError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartAuditMitigationActionsTaskError { + fn description(&self) -> &str { + match *self { + StartAuditMitigationActionsTaskError::InternalFailure(ref cause) => cause, + StartAuditMitigationActionsTaskError::InvalidRequest(ref cause) => cause, + StartAuditMitigationActionsTaskError::LimitExceeded(ref cause) => cause, + StartAuditMitigationActionsTaskError::TaskAlreadyExists(ref cause) => cause, + StartAuditMitigationActionsTaskError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by StartOnDemandAuditTask #[derive(Debug, PartialEq)] pub enum StartOnDemandAuditTaskError { @@ -16542,6 +17667,63 @@ impl Error for UpdateJobError { } } } +/// Errors returned by UpdateMitigationAction +#[derive(Debug, PartialEq)] +pub enum UpdateMitigationActionError { + ///

An unexpected error has occurred.

+ InternalFailure(String), + ///

The request is not valid.

+ InvalidRequest(String), + ///

The specified resource does not exist.

+ ResourceNotFound(String), + ///

The rate exceeds the limit.

+ Throttling(String), +} + +impl UpdateMitigationActionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalFailureException" => { + return RusotoError::Service(UpdateMitigationActionError::InternalFailure( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(UpdateMitigationActionError::InvalidRequest( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateMitigationActionError::ResourceNotFound( + err.msg, + )) + } + "ThrottlingException" => { + return RusotoError::Service(UpdateMitigationActionError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateMitigationActionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateMitigationActionError { + fn description(&self) -> &str { + match *self { + UpdateMitigationActionError::InternalFailure(ref cause) => cause, + UpdateMitigationActionError::InvalidRequest(ref cause) => cause, + UpdateMitigationActionError::ResourceNotFound(ref cause) => cause, + UpdateMitigationActionError::Throttling(ref cause) => cause, + } + } +} /// Errors returned by UpdateRoleAlias #[derive(Debug, PartialEq)] pub enum UpdateRoleAliasError { @@ -17061,7 +18243,7 @@ pub trait Iot { input: AttachPrincipalPolicyRequest, ) -> RusotoFuture<(), AttachPrincipalPolicyError>; - ///

Associates a Device Defender security profile with a thing group or with this account. Each thing group or account can have up to five security profiles associated with it.

+ ///

Associates a Device Defender security profile with a thing group or this account. Each thing group or account can have up to five security profiles associated with it.

fn attach_security_profile( &self, input: AttachSecurityProfileRequest, @@ -17073,6 +18255,12 @@ pub trait Iot { input: AttachThingPrincipalRequest, ) -> RusotoFuture; + ///

Cancels a mitigation action task that is in progress. If the task is not in progress, an InvalidRequestException occurs.

+ fn cancel_audit_mitigation_actions_task( + &self, + input: CancelAuditMitigationActionsTaskRequest, + ) -> RusotoFuture; + ///

Cancels an audit that is in progress. The audit can be either scheduled or on-demand. If the audit is not in progress, an "InvalidRequestException" occurs.

fn cancel_audit_task( &self, @@ -17138,6 +18326,12 @@ pub trait Iot { input: CreateKeysAndCertificateRequest, ) -> RusotoFuture; + ///

Defines an action that can be applied to audit findings by using StartAuditMitigationActionsTask. Each mitigation action can apply only one type of change.

+ fn create_mitigation_action( + &self, + input: CreateMitigationActionRequest, + ) -> RusotoFuture; + ///

Creates an AWS IoT OTAUpdate on a target group of things or groups.

fn create_ota_update( &self, @@ -17228,7 +18422,7 @@ pub trait Iot { input: DeleteCACertificateRequest, ) -> RusotoFuture; - ///

Deletes the specified certificate.

A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.

+ ///

Deletes the specified certificate.

A certificate cannot be deleted if it has a policy or IoT thing attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.

fn delete_certificate( &self, input: DeleteCertificateRequest, @@ -17249,6 +18443,12 @@ pub trait Iot { input: DeleteJobExecutionRequest, ) -> RusotoFuture<(), DeleteJobExecutionError>; + ///

Deletes a defined mitigation action from your AWS account.

+ fn delete_mitigation_action( + &self, + input: DeleteMitigationActionRequest, + ) -> RusotoFuture; + ///

Delete an OTA update.

fn delete_ota_update( &self, @@ -17337,6 +18537,21 @@ pub trait Iot { DescribeAccountAuditConfigurationError, >; + ///

Gets information about a single audit finding. Properties include the reason for noncompliance, the severity of the issue, and when the audit that returned the finding was started.

+ fn describe_audit_finding( + &self, + input: DescribeAuditFindingRequest, + ) -> RusotoFuture; + + ///

Gets information about an audit mitigation task that is used to apply mitigation actions to a set of audit findings. Properties include the actions being applied, the audit checks to which they're being applied, the task status, and aggregated task statistics.

+ fn describe_audit_mitigation_actions_task( + &self, + input: DescribeAuditMitigationActionsTaskRequest, + ) -> RusotoFuture< + DescribeAuditMitigationActionsTaskResponse, + DescribeAuditMitigationActionsTaskError, + >; + ///

Gets information about a Device Defender audit.

fn describe_audit_task( &self, @@ -17401,6 +18616,12 @@ pub trait Iot { input: DescribeJobExecutionRequest, ) -> RusotoFuture; + ///

Gets information about a mitigation action.

+ fn describe_mitigation_action( + &self, + input: DescribeMitigationActionRequest, + ) -> RusotoFuture; + ///

Describes a role alias.

fn describe_role_alias( &self, @@ -17562,6 +18783,21 @@ pub trait Iot { input: ListAuditFindingsRequest, ) -> RusotoFuture; + ///

Gets the status of audit mitigation action tasks that were executed.

+ fn list_audit_mitigation_actions_executions( + &self, + input: ListAuditMitigationActionsExecutionsRequest, + ) -> RusotoFuture< + ListAuditMitigationActionsExecutionsResponse, + ListAuditMitigationActionsExecutionsError, + >; + + ///

Gets a list of audit mitigation action tasks that match the specified filters.

+ fn list_audit_mitigation_actions_tasks( + &self, + input: ListAuditMitigationActionsTasksRequest, + ) -> RusotoFuture; + ///

Lists the Device Defender audits that have been performed during a given time period.

fn list_audit_tasks( &self, @@ -17619,6 +18855,12 @@ pub trait Iot { ///

Lists jobs.

fn list_jobs(&self, input: ListJobsRequest) -> RusotoFuture; + ///

Gets a list of all mitigation actions that match the specified filter criteria.

+ fn list_mitigation_actions( + &self, + input: ListMitigationActionsRequest, + ) -> RusotoFuture; + ///

Lists OTA updates.

fn list_ota_updates( &self, @@ -17775,7 +19017,7 @@ pub trait Iot { input: ListV2LoggingLevelsRequest, ) -> RusotoFuture; - ///

Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior or thing (device).

+ ///

Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior, or thing (device).

fn list_violation_events( &self, input: ListViolationEventsRequest, @@ -17859,6 +19101,12 @@ pub trait Iot { input: SetV2LoggingOptionsRequest, ) -> RusotoFuture<(), SetV2LoggingOptionsError>; + ///

Starts a task that applies a set of mitigation actions to the specified target.

+ fn start_audit_mitigation_actions_task( + &self, + input: StartAuditMitigationActionsTaskRequest, + ) -> RusotoFuture; + ///

Starts an on-demand Device Defender audit.

fn start_on_demand_audit_task( &self, @@ -17958,13 +19206,19 @@ pub trait Iot { ///

Updates supported fields of the specified job.

fn update_job(&self, input: UpdateJobRequest) -> RusotoFuture<(), UpdateJobError>; + ///

Updates the definition for the specified mitigation action.

+ fn update_mitigation_action( + &self, + input: UpdateMitigationActionRequest, + ) -> RusotoFuture; + ///

Updates a role alias.

fn update_role_alias( &self, input: UpdateRoleAliasRequest, ) -> RusotoFuture; - ///

Updates a scheduled audit, including what checks are performed and how often the audit takes place.

+ ///

Updates a scheduled audit, including which checks are performed and how often the audit takes place.

fn update_scheduled_audit( &self, input: UpdateScheduledAuditRequest, @@ -18018,10 +19272,7 @@ impl IotClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> IotClient { - IotClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -18035,10 +19286,14 @@ impl IotClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - IotClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> IotClient { + IotClient { client, region } } } @@ -18239,7 +19494,7 @@ impl Iot for IotClient { }) } - ///

Associates a Device Defender security profile with a thing group or with this account. Each thing group or account can have up to five security profiles associated with it.

+ ///

Associates a Device Defender security profile with a thing group or this account. Each thing group or account can have up to five security profiles associated with it.

fn attach_security_profile( &self, input: AttachSecurityProfileRequest, @@ -18314,6 +19569,40 @@ impl Iot for IotClient { }) } + ///

Cancels a mitigation action task that is in progress. If the task is not in progress, an InvalidRequestException occurs.

+ fn cancel_audit_mitigation_actions_task( + &self, + input: CancelAuditMitigationActionsTaskRequest, + ) -> RusotoFuture + { + let request_uri = format!( + "/audit/mitigationactions/tasks/{task_id}/cancel", + task_id = input.task_id + ); + + let mut request = SignedRequest::new("PUT", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(CancelAuditMitigationActionsTaskError::from_response( + response, + )) + })) + } + }) + } + ///

Cancels an audit that is in progress. The audit can be either scheduled or on-demand. If the audit is not in progress, an "InvalidRequestException" occurs.

fn cancel_audit_task( &self, @@ -18693,6 +19982,41 @@ impl Iot for IotClient { }) } + ///

Defines an action that can be applied to audit findings by using StartAuditMitigationActionsTask. Each mitigation action can apply only one type of change.

+ fn create_mitigation_action( + &self, + input: CreateMitigationActionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/mitigationactions/actions/{action_name}", + action_name = input.action_name + ); + + let mut request = SignedRequest::new("POST", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(CreateMitigationActionError::from_response(response)) + }), + ) + } + }) + } + ///

Creates an AWS IoT OTAUpdate on a target group of things or groups.

fn create_ota_update( &self, @@ -19224,7 +20548,7 @@ impl Iot for IotClient { }) } - ///

Deletes the specified certificate.

A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.

+ ///

Deletes the specified certificate.

A certificate cannot be deleted if it has a policy or IoT thing attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.

fn delete_certificate( &self, input: DeleteCertificateRequest, @@ -19350,25 +20674,58 @@ impl Iot for IotClient { request.set_endpoint_prefix("iot".to_string()); - let mut params = Params::new(); - if let Some(ref x) = input.force { - params.put("force", x); - } - request.set_params(params); - + let mut params = Params::new(); + if let Some(ref x) = input.force { + params.put("force", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = ::std::mem::drop(response); + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteJobExecutionError::from_response(response))), + ) + } + }) + } + + ///

Deletes a defined mitigation action from your AWS account.

+ fn delete_mitigation_action( + &self, + input: DeleteMitigationActionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/mitigationactions/actions/{action_name}", + action_name = input.action_name + ); + + let mut request = SignedRequest::new("DELETE", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + self.client.sign_and_dispatch(request, |response| { if response.status.is_success() { Box::new(response.buffer().from_err().and_then(|response| { - let result = ::std::mem::drop(response); + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; Ok(result) })) } else { Box::new( - response - .buffer() - .from_err() - .and_then(|response| Err(DeleteJobExecutionError::from_response(response))), + response.buffer().from_err().and_then(|response| { + Err(DeleteMitigationActionError::from_response(response)) + }), ) } }) @@ -19883,6 +21240,75 @@ impl Iot for IotClient { }) } + ///

Gets information about a single audit finding. Properties include the reason for noncompliance, the severity of the issue, and when the audit that returned the finding was started.

+ fn describe_audit_finding( + &self, + input: DescribeAuditFindingRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/audit/findings/{finding_id}", + finding_id = input.finding_id + ); + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeAuditFindingError::from_response(response)) + }), + ) + } + }) + } + + ///

Gets information about an audit mitigation task that is used to apply mitigation actions to a set of audit findings. Properties include the actions being applied, the audit checks to which they're being applied, the task status, and aggregated task statistics.

+ fn describe_audit_mitigation_actions_task( + &self, + input: DescribeAuditMitigationActionsTaskRequest, + ) -> RusotoFuture< + DescribeAuditMitigationActionsTaskResponse, + DescribeAuditMitigationActionsTaskError, + > { + let request_uri = format!( + "/audit/mitigationactions/tasks/{task_id}", + task_id = input.task_id + ); + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeAuditMitigationActionsTaskError::from_response( + response, + )) + })) + } + }) + } + ///

Gets information about a Device Defender audit.

fn describe_audit_task( &self, @@ -20240,6 +21666,37 @@ impl Iot for IotClient { }) } + ///

Gets information about a mitigation action.

+ fn describe_mitigation_action( + &self, + input: DescribeMitigationActionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/mitigationactions/actions/{action_name}", + action_name = input.action_name + ); + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeMitigationActionError::from_response(response)) + })) + } + }) + } + ///

Describes a role alias.

fn describe_role_alias( &self, @@ -21166,6 +22623,105 @@ impl Iot for IotClient { }) } + ///

Gets the status of audit mitigation action tasks that were executed.

+ fn list_audit_mitigation_actions_executions( + &self, + input: ListAuditMitigationActionsExecutionsRequest, + ) -> RusotoFuture< + ListAuditMitigationActionsExecutionsResponse, + ListAuditMitigationActionsExecutionsError, + > { + let request_uri = "/audit/mitigationactions/executions"; + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + let mut params = Params::new(); + if let Some(ref x) = input.action_status { + params.put("actionStatus", x); + } + params.put("findingId", &input.finding_id); + if let Some(ref x) = input.max_results { + params.put("maxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + params.put("taskId", &input.task_id); + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::( + )?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(ListAuditMitigationActionsExecutionsError::from_response( + response, + )) + })) + } + }) + } + + ///

Gets a list of audit mitigation action tasks that match the specified filters.

+ fn list_audit_mitigation_actions_tasks( + &self, + input: ListAuditMitigationActionsTasksRequest, + ) -> RusotoFuture + { + let request_uri = "/audit/mitigationactions/tasks"; + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + let mut params = Params::new(); + if let Some(ref x) = input.audit_task_id { + params.put("auditTaskId", x); + } + params.put("endTime", &input.end_time); + if let Some(ref x) = input.finding_id { + params.put("findingId", x); + } + if let Some(ref x) = input.max_results { + params.put("maxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + params.put("startTime", &input.start_time); + if let Some(ref x) = input.task_status { + params.put("taskStatus", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(ListAuditMitigationActionsTasksError::from_response( + response, + )) + })) + } + }) + } + ///

Lists the Device Defender audits that have been performed during a given time period.

fn list_audit_tasks( &self, @@ -21603,6 +23159,48 @@ impl Iot for IotClient { }) } + ///

Gets a list of all mitigation actions that match the specified filter criteria.

+ fn list_mitigation_actions( + &self, + input: ListMitigationActionsRequest, + ) -> RusotoFuture { + let request_uri = "/mitigationactions/actions"; + + let mut request = SignedRequest::new("GET", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + + let mut params = Params::new(); + if let Some(ref x) = input.action_type { + params.put("actionType", x); + } + if let Some(ref x) = input.max_results { + params.put("maxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("nextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListMitigationActionsError::from_response(response)) + }), + ) + } + }) + } + ///

Lists OTA updates.

fn list_ota_updates( &self, @@ -22689,7 +24287,7 @@ impl Iot for IotClient { }) } - ///

Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior or thing (device).

+ ///

Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior, or thing (device).

fn list_violation_events( &self, input: ListViolationEventsRequest, @@ -23157,6 +24755,42 @@ impl Iot for IotClient { }) } + ///

Starts a task that applies a set of mitigation actions to the specified target.

+ fn start_audit_mitigation_actions_task( + &self, + input: StartAuditMitigationActionsTaskRequest, + ) -> RusotoFuture + { + let request_uri = format!( + "/audit/mitigationactions/tasks/{task_id}", + task_id = input.task_id + ); + + let mut request = SignedRequest::new("POST", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(StartAuditMitigationActionsTaskError::from_response( + response, + )) + })) + } + }) + } + ///

Starts an on-demand Device Defender audit.

fn start_on_demand_audit_task( &self, @@ -23736,6 +25370,41 @@ impl Iot for IotClient { }) } + ///

Updates the definition for the specified mitigation action.

+ fn update_mitigation_action( + &self, + input: UpdateMitigationActionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/mitigationactions/actions/{action_name}", + action_name = input.action_name + ); + + let mut request = SignedRequest::new("PATCH", "execute-api", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("iot".to_string()); + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(UpdateMitigationActionError::from_response(response)) + }), + ) + } + }) + } + ///

Updates a role alias.

fn update_role_alias( &self, @@ -23769,7 +25438,7 @@ impl Iot for IotClient { }) } - ///

Updates a scheduled audit, including what checks are performed and how often the audit takes place.

+ ///

Updates a scheduled audit, including which checks are performed and how often the audit takes place.

fn update_scheduled_audit( &self, input: UpdateScheduledAuditRequest, diff --git a/rusoto/services/iot1click-devices/Cargo.toml b/rusoto/services/iot1click-devices/Cargo.toml index 223f9b47ba6..e83bbafba95 100644 --- a/rusoto/services/iot1click-devices/Cargo.toml +++ b/rusoto/services/iot1click-devices/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iot1click_devices" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iot1click-devices/README.md b/rusoto/services/iot1click-devices/README.md index 34e23489e85..0f43e742c6f 100644 --- a/rusoto/services/iot1click-devices/README.md +++ b/rusoto/services/iot1click-devices/README.md @@ -23,9 +23,16 @@ To use `rusoto_iot1click_devices` in your application, add it as a dependency in ```toml [dependencies] -rusoto_iot1click_devices = "0.40.0" +rusoto_iot1click_devices = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iot1click-devices/src/custom/mod.rs b/rusoto/services/iot1click-devices/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iot1click-devices/src/custom/mod.rs +++ b/rusoto/services/iot1click-devices/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iot1click-devices/src/generated.rs b/rusoto/services/iot1click-devices/src/generated.rs index 689e5610106..906cd29b27c 100644 --- a/rusoto/services/iot1click-devices/src/generated.rs +++ b/rusoto/services/iot1click-devices/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Attributes {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -37,7 +36,7 @@ pub struct ClaimDevicesByClaimCodeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClaimDevicesByClaimCodeResponse { ///

The claim code provided by the device manufacturer.

#[serde(rename = "ClaimCode")] @@ -58,7 +57,7 @@ pub struct DescribeDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDeviceResponse { ///

Device details.

#[serde(rename = "DeviceDescription")] @@ -67,7 +66,7 @@ pub struct DescribeDeviceResponse { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Device { ///

The user specified attributes associated with the device for an event.

#[serde(rename = "Attributes")] @@ -84,7 +83,7 @@ pub struct Device { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceDescription { ///

The ARN of the device.

#[serde(rename = "Arn")] @@ -119,7 +118,7 @@ pub struct DeviceDescription { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceEvent { ///

An object representing the device associated with the event.

#[serde(rename = "Device")] @@ -159,7 +158,7 @@ pub struct FinalizeDeviceClaimRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FinalizeDeviceClaimResponse { ///

The device's final claim state.

#[serde(rename = "State")] @@ -175,7 +174,7 @@ pub struct GetDeviceMethodsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeviceMethodsResponse { ///

List of available device APIs.

#[serde(rename = "DeviceMethods")] @@ -191,7 +190,7 @@ pub struct InitiateDeviceClaimRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateDeviceClaimResponse { ///

The device's final claim state.

#[serde(rename = "State")] @@ -215,7 +214,7 @@ pub struct InvokeDeviceMethodRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InvokeDeviceMethodResponse { ///

A JSON encoded string containing the device method response.

#[serde(rename = "DeviceMethodResponse")] @@ -250,7 +249,7 @@ pub struct ListDeviceEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDeviceEventsResponse { ///

An array of zero or more elements describing the event(s) associated with the /// device.

@@ -281,7 +280,7 @@ pub struct ListDevicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevicesResponse { ///

A list of devices.

#[serde(rename = "Devices")] @@ -301,7 +300,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

A collection of key/value pairs defining the resource tags. For example, { /// "tags": {"key1": "value1", "key2": "value2"} }. For more information, see AWS @@ -335,7 +334,7 @@ pub struct UnclaimDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnclaimDeviceResponse { ///

The device's final claim state.

#[serde(rename = "State")] @@ -366,7 +365,7 @@ pub struct UpdateDeviceStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDeviceStateResponse {} /// Errors returned by ClaimDevicesByClaimCode @@ -1102,10 +1101,7 @@ impl Iot1ClickDevicesClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> Iot1ClickDevicesClient { - Iot1ClickDevicesClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1119,10 +1115,14 @@ impl Iot1ClickDevicesClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - Iot1ClickDevicesClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Iot1ClickDevicesClient { + Iot1ClickDevicesClient { client, region } } } diff --git a/rusoto/services/iot1click-projects/Cargo.toml b/rusoto/services/iot1click-projects/Cargo.toml index 331575ea644..70be19980a1 100644 --- a/rusoto/services/iot1click-projects/Cargo.toml +++ b/rusoto/services/iot1click-projects/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iot1click_projects" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iot1click-projects/README.md b/rusoto/services/iot1click-projects/README.md index 957f6fbf318..3c215bb5ddc 100644 --- a/rusoto/services/iot1click-projects/README.md +++ b/rusoto/services/iot1click-projects/README.md @@ -23,9 +23,16 @@ To use `rusoto_iot1click_projects` in your application, add it as a dependency i ```toml [dependencies] -rusoto_iot1click_projects = "0.40.0" +rusoto_iot1click_projects = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iot1click-projects/src/custom/mod.rs b/rusoto/services/iot1click-projects/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iot1click-projects/src/custom/mod.rs +++ b/rusoto/services/iot1click-projects/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iot1click-projects/src/generated.rs b/rusoto/services/iot1click-projects/src/generated.rs index 5610cce5f32..7886ea074cc 100644 --- a/rusoto/services/iot1click-projects/src/generated.rs +++ b/rusoto/services/iot1click-projects/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -42,7 +41,7 @@ pub struct AssociateDeviceWithPlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDeviceWithPlacementResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -60,7 +59,7 @@ pub struct CreatePlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePlacementResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -83,7 +82,7 @@ pub struct CreateProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProjectResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -97,7 +96,7 @@ pub struct DeletePlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePlacementResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -108,7 +107,7 @@ pub struct DeleteProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProjectResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -122,7 +121,7 @@ pub struct DescribePlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePlacementResponse { ///

An object describing the placement.

#[serde(rename = "placement")] @@ -137,7 +136,7 @@ pub struct DescribeProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProjectResponse { ///

An object describing the project.

#[serde(rename = "project")] @@ -171,7 +170,7 @@ pub struct DisassociateDeviceFromPlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDeviceFromPlacementResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -185,7 +184,7 @@ pub struct GetDevicesInPlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDevicesInPlacementResponse { ///

An object containing the devices (zero or more) within the placement.

#[serde(rename = "devices")] @@ -208,7 +207,7 @@ pub struct ListPlacementsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPlacementsResponse { ///

The token used to retrieve the next set of results - will be effectively empty if there are no further results.

#[serde(rename = "nextToken")] @@ -232,7 +231,7 @@ pub struct ListProjectsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProjectsResponse { ///

The token used to retrieve the next set of results - will be effectively empty if there are no further results.

#[serde(rename = "nextToken")] @@ -251,7 +250,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags (metadata key/value pairs) which you have assigned to the resource.

#[serde(rename = "tags")] @@ -261,7 +260,7 @@ pub struct ListTagsForResourceResponse { ///

An object describing a project's placement.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PlacementDescription { ///

The user-defined attributes associated with the placement.

#[serde(rename = "attributes")] @@ -282,7 +281,7 @@ pub struct PlacementDescription { ///

An object providing summary information for a particular placement.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PlacementSummary { ///

The date when the placement was originally created, in UNIX epoch time format.

#[serde(rename = "createdDate")] @@ -313,7 +312,7 @@ pub struct PlacementTemplate { ///

An object providing detailed information for a particular project associated with an AWS account and region.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectDescription { ///

The ARN of the project.

#[serde(rename = "arn")] @@ -344,7 +343,7 @@ pub struct ProjectDescription { ///

An object providing summary information for a particular project for an associated AWS account and region.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectSummary { ///

The ARN of the project.

#[serde(rename = "arn")] @@ -376,7 +375,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -390,7 +389,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -408,7 +407,7 @@ pub struct UpdatePlacementRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePlacementResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -427,7 +426,7 @@ pub struct UpdateProjectRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProjectResponse {} /// Errors returned by AssociateDeviceWithPlacement @@ -1324,10 +1323,7 @@ impl Iot1ClickProjectsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> Iot1ClickProjectsClient { - Iot1ClickProjectsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1341,10 +1337,14 @@ impl Iot1ClickProjectsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - Iot1ClickProjectsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Iot1ClickProjectsClient { + Iot1ClickProjectsClient { client, region } } } diff --git a/rusoto/services/iotanalytics/Cargo.toml b/rusoto/services/iotanalytics/Cargo.toml index a362c391d45..6cc7166b191 100644 --- a/rusoto/services/iotanalytics/Cargo.toml +++ b/rusoto/services/iotanalytics/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_iotanalytics" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/iotanalytics/README.md b/rusoto/services/iotanalytics/README.md index d5bbb179e60..eb1662a251f 100644 --- a/rusoto/services/iotanalytics/README.md +++ b/rusoto/services/iotanalytics/README.md @@ -23,9 +23,16 @@ To use `rusoto_iotanalytics` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_iotanalytics = "0.40.0" +rusoto_iotanalytics = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/iotanalytics/src/custom/mod.rs b/rusoto/services/iotanalytics/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/iotanalytics/src/custom/mod.rs +++ b/rusoto/services/iotanalytics/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/iotanalytics/src/generated.rs b/rusoto/services/iotanalytics/src/generated.rs index fb5557d802f..27054e3f597 100644 --- a/rusoto/services/iotanalytics/src/generated.rs +++ b/rusoto/services/iotanalytics/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -42,7 +41,7 @@ pub struct AddAttributesActivity { ///

Contains informations about errors.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchPutMessageErrorEntry { ///

The code associated with the error.

#[serde(rename = "errorCode")] @@ -69,7 +68,7 @@ pub struct BatchPutMessageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchPutMessageResponse { ///

A list of any errors encountered when sending the messages to the channel.

#[serde(rename = "batchPutMessageErrorEntries")] @@ -88,12 +87,12 @@ pub struct CancelPipelineReprocessingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelPipelineReprocessingResponse {} ///

A collection of data from an MQTT topic. Channels archive the raw, unprocessed messages before publishing the data to a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Channel { ///

The ARN of the channel.

#[serde(rename = "arn")] @@ -142,7 +141,7 @@ pub struct ChannelActivity { ///

Statistics information about the channel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChannelStatistics { ///

The estimated size of the channel.

#[serde(rename = "size")] @@ -165,7 +164,7 @@ pub struct ChannelStorage { ///

Where channel data is stored.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChannelStorageSummary { ///

Used to store channel data in an S3 bucket that you manage.

#[serde(rename = "customerManagedS3")] @@ -179,7 +178,7 @@ pub struct ChannelStorageSummary { ///

A summary of information about a channel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChannelSummary { ///

The name of the channel.

#[serde(rename = "channelName")] @@ -241,7 +240,7 @@ pub struct CreateChannelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateChannelResponse { ///

The ARN of the channel.

#[serde(rename = "channelArn")] @@ -265,7 +264,7 @@ pub struct CreateDatasetContentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDatasetContentResponse { ///

The version ID of the data set contents which are being created.

#[serde(rename = "versionId")] @@ -304,7 +303,7 @@ pub struct CreateDatasetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDatasetResponse { ///

The ARN of the data set.

#[serde(rename = "datasetArn")] @@ -340,7 +339,7 @@ pub struct CreateDatastoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDatastoreResponse { ///

The ARN of the data store.

#[serde(rename = "datastoreArn")] @@ -371,7 +370,7 @@ pub struct CreatePipelineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePipelineResponse { ///

The ARN of the pipeline.

#[serde(rename = "pipelineArn")] @@ -400,7 +399,7 @@ pub struct CustomerManagedChannelS3Storage { ///

Used to store channel data in an S3 bucket that you manage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CustomerManagedChannelS3StorageSummary { ///

The name of the Amazon S3 bucket in which channel data is stored.

#[serde(rename = "bucket")] @@ -433,7 +432,7 @@ pub struct CustomerManagedDatastoreS3Storage { ///

Used to store data store data in an S3 bucket that you manage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CustomerManagedDatastoreS3StorageSummary { ///

The name of the Amazon S3 bucket in which data store data is stored.

#[serde(rename = "bucket")] @@ -451,7 +450,7 @@ pub struct CustomerManagedDatastoreS3StorageSummary { ///

Information about a data set.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Dataset { ///

The "DatasetAction" objects that automatically create the data set contents.

#[serde(rename = "actions")] @@ -514,7 +513,7 @@ pub struct DatasetAction { ///

Information about the action which automatically creates the data set's contents.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatasetActionSummary { ///

The name of the action which automatically creates the data set's contents.

#[serde(rename = "actionName")] @@ -553,7 +552,7 @@ pub struct DatasetContentDeliveryRule { ///

The state of the data set contents and the reason they are in this state.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatasetContentStatus { ///

The reason the data set contents are in this state.

#[serde(rename = "reason")] @@ -567,7 +566,7 @@ pub struct DatasetContentStatus { ///

Summary information about data set contents.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatasetContentSummary { ///

The actual time the creation of the data set contents was started.

#[serde(rename = "creationTime")] @@ -597,7 +596,7 @@ pub struct DatasetContentVersionValue { ///

The reference to a data set entry.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatasetEntry { ///

The pre-signed URI of the data set item.

#[serde(rename = "dataURI")] @@ -611,7 +610,7 @@ pub struct DatasetEntry { ///

A summary of information about a data set.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatasetSummary { ///

A list of "DataActionSummary" objects.

#[serde(rename = "actions")] @@ -654,7 +653,7 @@ pub struct DatasetTrigger { ///

Information about a data store.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Datastore { ///

The ARN of the data store.

#[serde(rename = "arn")] @@ -699,7 +698,7 @@ pub struct DatastoreActivity { ///

Statistical information about the data store.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatastoreStatistics { ///

The estimated size of the data store.

#[serde(rename = "size")] @@ -722,7 +721,7 @@ pub struct DatastoreStorage { ///

Where data store data is stored.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatastoreStorageSummary { ///

Used to store data store data in an S3 bucket that you manage.

#[serde(rename = "customerManagedS3")] @@ -736,7 +735,7 @@ pub struct DatastoreStorageSummary { ///

A summary of information about a data store.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DatastoreSummary { ///

When the data store was created.

#[serde(rename = "creationTime")] @@ -822,7 +821,7 @@ pub struct DescribeChannelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeChannelResponse { ///

An object that contains information about the channel.

#[serde(rename = "channel")] @@ -842,7 +841,7 @@ pub struct DescribeDatasetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDatasetResponse { ///

An object that contains information about the data set.

#[serde(rename = "dataset")] @@ -862,7 +861,7 @@ pub struct DescribeDatastoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDatastoreResponse { ///

Information about the data store.

#[serde(rename = "datastore")] @@ -878,7 +877,7 @@ pub struct DescribeDatastoreResponse { pub struct DescribeLoggingOptionsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLoggingOptionsResponse { ///

The current settings of the AWS IoT Analytics logging options.

#[serde(rename = "loggingOptions")] @@ -894,7 +893,7 @@ pub struct DescribePipelineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePipelineResponse { ///

A "Pipeline" object that contains information about the pipeline.

#[serde(rename = "pipeline")] @@ -946,7 +945,7 @@ pub struct DeviceShadowEnrichActivity { ///

The estimated size of the resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EstimatedResourceSize { ///

The time when the estimate of the size of the resource was made.

#[serde(rename = "estimatedOn")] @@ -985,7 +984,7 @@ pub struct GetDatasetContentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDatasetContentResponse { ///

A list of "DatasetEntry" objects.

#[serde(rename = "entries")] @@ -1054,7 +1053,7 @@ pub struct ListChannelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListChannelsResponse { ///

A list of "ChannelSummary" objects.

#[serde(rename = "channelSummaries")] @@ -1090,7 +1089,7 @@ pub struct ListDatasetContentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDatasetContentsResponse { ///

Summary information about data set contents that have been created.

#[serde(rename = "datasetContentSummaries")] @@ -1115,7 +1114,7 @@ pub struct ListDatasetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDatasetsResponse { ///

A list of "DatasetSummary" objects.

#[serde(rename = "datasetSummaries")] @@ -1140,7 +1139,7 @@ pub struct ListDatastoresRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDatastoresResponse { ///

A list of "DatastoreSummary" objects.

#[serde(rename = "datastoreSummaries")] @@ -1165,7 +1164,7 @@ pub struct ListPipelinesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPipelinesResponse { ///

The token to retrieve the next set of results, or null if there are no more results.

#[serde(rename = "nextToken")] @@ -1185,7 +1184,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The tags (metadata) which you have assigned to the resource.

#[serde(rename = "tags")] @@ -1251,7 +1250,7 @@ pub struct OutputFileUriValue { ///

Contains information about a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Pipeline { ///

The activities that perform transformations on the messages.

#[serde(rename = "activities")] @@ -1326,7 +1325,7 @@ pub struct PipelineActivity { ///

A summary of information about a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PipelineSummary { ///

When the pipeline was created.

#[serde(rename = "creationTime")] @@ -1379,7 +1378,7 @@ pub struct RemoveAttributesActivity { ///

Information about pipeline reprocessing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReprocessingSummary { ///

The time the pipeline reprocessing was created.

#[serde(rename = "creationTime")] @@ -1435,7 +1434,7 @@ pub struct RunPipelineActivityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RunPipelineActivityResponse { ///

In case the pipeline activity fails, the log message that is generated.

#[serde(rename = "logResult")] @@ -1490,7 +1489,7 @@ pub struct SampleChannelDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SampleChannelDataResponse { ///

The list of message samples. Each sample message is returned as a base64-encoded string.

#[serde(rename = "payloads")] @@ -1533,7 +1532,7 @@ pub struct ServiceManagedChannelS3Storage {} ///

Used to store channel data in an S3 bucket managed by the AWS IoT Analytics service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceManagedChannelS3StorageSummary {} ///

Use this to store data store data in an S3 bucket managed by the AWS IoT Analytics service.

@@ -1542,7 +1541,7 @@ pub struct ServiceManagedDatastoreS3Storage {} ///

Used to store data store data in an S3 bucket managed by the AWS IoT Analytics service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceManagedDatastoreS3StorageSummary {} ///

The SQL query to modify the message.

@@ -1573,7 +1572,7 @@ pub struct StartPipelineReprocessingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartPipelineReprocessingResponse { ///

The ID of the pipeline reprocessing activity that was started.

#[serde(rename = "reprocessingId")] @@ -1603,7 +1602,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Information about the data set whose content generation triggers the new data set content generation.

@@ -1625,7 +1624,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3958,10 +3957,7 @@ impl IotAnalyticsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> IotAnalyticsClient { - IotAnalyticsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3975,10 +3971,14 @@ impl IotAnalyticsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - IotAnalyticsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> IotAnalyticsClient { + IotAnalyticsClient { client, region } } } diff --git a/rusoto/services/kafka/Cargo.toml b/rusoto/services/kafka/Cargo.toml index 20e690d1e0d..765459f4887 100644 --- a/rusoto/services/kafka/Cargo.toml +++ b/rusoto/services/kafka/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kafka" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kafka/README.md b/rusoto/services/kafka/README.md index 0efae43870d..db24f0dfdac 100644 --- a/rusoto/services/kafka/README.md +++ b/rusoto/services/kafka/README.md @@ -23,9 +23,16 @@ To use `rusoto_kafka` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_kafka = "0.40.0" +rusoto_kafka = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kafka/src/custom/mod.rs b/rusoto/services/kafka/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kafka/src/custom/mod.rs +++ b/rusoto/services/kafka/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kafka/src/generated.rs b/rusoto/services/kafka/src/generated.rs index 99da13ddd28..7de219efa31 100644 --- a/rusoto/services/kafka/src/generated.rs +++ b/rusoto/services/kafka/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -73,7 +72,7 @@ pub struct BrokerNodeGroupInfo { ///
        <p>BrokerNodeInfo</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BrokerNodeInfo { ///
        <p>The attached elastic network interface of the broker.</p>
     /// 
@@ -110,7 +109,7 @@ pub struct BrokerNodeInfo { ///
        <p>Information about the current software installed on the cluster.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BrokerSoftwareInfo { ///
        <p>The Amazon Resource Name (ARN) of the configuration used for the cluster. This field isn't visible in this preview release.</p>
     /// 
@@ -143,7 +142,7 @@ pub struct ClientAuthentication { ///
        <p>Returns information about a cluster.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterInfo { ///
        <p>Arn of active cluster operation.</p>
     /// 
@@ -220,7 +219,7 @@ pub struct ClusterInfo { ///
        <p>Returns information about a cluster operation.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterOperationInfo { ///
        <p>The ID of the API request that triggered this operation.</p>
     /// 
@@ -277,7 +276,7 @@ pub struct ClusterOperationInfo { ///
        <p>Represents an MSK Configuration.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Configuration { ///
        <p>The Amazon Resource Name (ARN) of the configuration.</p>
     /// 
@@ -322,7 +321,7 @@ pub struct ConfigurationInfo { ///
        <p>Describes a configuration revision.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigurationRevision { ///
        <p>The time when the configuration revision was created.</p>
     /// 
@@ -385,7 +384,7 @@ pub struct CreateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClusterResponse { ///
        <p>The Amazon Resource Name (ARN) of the cluster.</p>
     /// 
@@ -432,7 +431,7 @@ pub struct CreateConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConfigurationResponse { ///
        <p>The Amazon Resource Name (ARN) of the configuration.</p>
     /// 
@@ -470,7 +469,7 @@ pub struct DeleteClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteClusterResponse { ///
        <p>The Amazon Resource Name (ARN) of the cluster.</p>
     /// 
@@ -493,7 +492,7 @@ pub struct DescribeClusterOperationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClusterOperationResponse { ///
        <p>Cluster operation information</p>
     /// 
@@ -511,7 +510,7 @@ pub struct DescribeClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClusterResponse { ///
        <p>The cluster information.</p>
     /// 
@@ -529,7 +528,7 @@ pub struct DescribeConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationResponse { ///
        <p>The Amazon Resource Name (ARN) of the configuration.</p>
     /// 
@@ -576,7 +575,7 @@ pub struct DescribeConfigurationRevisionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationRevisionResponse { ///
        <p>The Amazon Resource Name (ARN) of the configuration.</p>
     /// 
@@ -675,7 +674,7 @@ pub struct EncryptionInfo { ///
        <p>Returns information about an error state of the cluster.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorInfo { ///
        <p>A number describing the error programmatically.</p>
     /// 
@@ -698,7 +697,7 @@ pub struct GetBootstrapBrokersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBootstrapBrokersResponse { ///
        <p>A string containing one or more hostname:port pairs.</p>
     /// 
@@ -732,7 +731,7 @@ pub struct ListClusterOperationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClusterOperationsResponse { ///
        <p>An array of cluster operation information objects.</p>
     /// 
@@ -767,7 +766,7 @@ pub struct ListClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClustersResponse { ///
        <p>Information on each of the MSK clusters in the response.</p>
     /// 
@@ -802,7 +801,7 @@ pub struct ListConfigurationRevisionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConfigurationRevisionsResponse { ///
        <p>Paginated results marker.</p>
     /// 
@@ -832,7 +831,7 @@ pub struct ListConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConfigurationsResponse { ///
        <p>An array of MSK configurations.</p>
     /// 
@@ -867,7 +866,7 @@ pub struct ListNodesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListNodesResponse { ///
        <p>The paginated results marker. When the result of a ListNodes operation is truncated, the call returns NextToken in the response.
     /// To get another batch of nodes, provide this token in your next request.</p>
@@ -891,7 +890,7 @@ pub struct ListTagsForResourceRequest {
 }
 
 #[derive(Default, Debug, Clone, PartialEq, Deserialize)]
-#[cfg_attr(test, derive(Serialize))]
+#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
 pub struct ListTagsForResourceResponse {
     /// 
        <p>The key-value pair for the resource tag.</p>
     /// 
@@ -903,7 +902,7 @@ pub struct ListTagsForResourceResponse { ///
        <p>Information about cluster attributes that can be updated via update APIs.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MutableClusterInfo { ///
        <p>Specifies the size of the EBS volume and the ID of the associated broker.</p>
     /// 
@@ -925,7 +924,7 @@ pub struct MutableClusterInfo { ///
        <p>The node information object.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NodeInfo { ///
        <p>The start time.</p>
     /// 
@@ -1041,7 +1040,7 @@ pub struct UpdateBrokerStorageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBrokerStorageResponse { ///
        <p>The Amazon Resource Name (ARN) of the cluster.</p>
     /// 
@@ -1072,7 +1071,7 @@ pub struct UpdateClusterConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateClusterConfigurationResponse { ///
        <p>The Amazon Resource Name (ARN) of the cluster.</p>
     /// 
@@ -1089,7 +1088,7 @@ pub struct UpdateClusterConfigurationResponse { ///
        <p>Zookeeper node information.</p>
 /// 
#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ZookeeperNodeInfo { ///
        <p>The attached elastic network interface of the broker.</p>
     /// 
@@ -2418,10 +2417,7 @@ impl KafkaClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KafkaClient { - KafkaClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2435,10 +2431,14 @@ impl KafkaClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KafkaClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KafkaClient { + KafkaClient { client, region } } } diff --git a/rusoto/services/kinesis-video-archived-media/Cargo.toml b/rusoto/services/kinesis-video-archived-media/Cargo.toml index a2fd04a96b7..9c6f3f1522b 100644 --- a/rusoto/services/kinesis-video-archived-media/Cargo.toml +++ b/rusoto/services/kinesis-video-archived-media/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kinesis_video_archived_media" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kinesis-video-archived-media/README.md b/rusoto/services/kinesis-video-archived-media/README.md index 73ffa513333..8303436517f 100644 --- a/rusoto/services/kinesis-video-archived-media/README.md +++ b/rusoto/services/kinesis-video-archived-media/README.md @@ -23,9 +23,16 @@ To use `rusoto_kinesis_video_archived_media` in your application, add it as a de ```toml [dependencies] -rusoto_kinesis_video_archived_media = "0.40.0" +rusoto_kinesis_video_archived_media = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kinesis-video-archived-media/src/custom/mod.rs b/rusoto/services/kinesis-video-archived-media/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kinesis-video-archived-media/src/custom/mod.rs +++ b/rusoto/services/kinesis-video-archived-media/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kinesis-video-archived-media/src/generated.rs b/rusoto/services/kinesis-video-archived-media/src/generated.rs index ba3ee60adde..74ccf6748ec 100644 --- a/rusoto/services/kinesis-video-archived-media/src/generated.rs +++ b/rusoto/services/kinesis-video-archived-media/src/generated.rs @@ -9,30 +9,55 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; +///

Contains the range of timestamps for the requested media, and the source of the timestamps.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DASHFragmentSelector { + ///

The source of the timestamps for the requested media.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetDASHStreamingSessionURLInput$PlaybackMode is ON_DEMAND or LIVE_REPLAY, the first fragment ingested with a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer timestamps within the TimestampRange ingested immediately following the first fragment (up to the GetDASHStreamingSessionURLInput$MaxManifestFragmentResults value) are included.

Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the MPEG-DASH manifest will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetDASHStreamingSessionURLInput$PlaybackMode is LIVE, the producer timestamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server timestamps are included in the MPEG-DASH manifest. This means that even if fragments ingested in the past have producer timestamps with values now, they are not included in the HLS media playlist.

The default is SERVER_TIMESTAMP.

+ #[serde(rename = "FragmentSelectorType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub fragment_selector_type: Option, + ///

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

+ #[serde(rename = "TimestampRange")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timestamp_range: Option, +} + +///

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the DASHimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DASHTimestampRange { + ///

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVERTIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ONDEMAND mode, but optional for LIVEREPLAY mode. If the EndTimestamp is not set for LIVEREPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

+ #[serde(rename = "EndTimestamp")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end_timestamp: Option, + ///

The start of the timestamp range for the requested media.

If the DASHTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

+ #[serde(rename = "StartTimestamp")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_timestamp: Option, +} + ///

Represents a segment of video or other time-delimited data.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Fragment { ///

The playback duration or other time value associated with the fragment.

#[serde(rename = "FragmentLengthInMilliseconds")] #[serde(skip_serializing_if = "Option::is_none")] pub fragment_length_in_milliseconds: Option, - ///

The index value of the fragment.

+ ///

The unique identifier of the fragment. This value monotonically increases based on the ingestion order.

#[serde(rename = "FragmentNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub fragment_number: Option, @@ -61,6 +86,51 @@ pub struct FragmentSelector { pub timestamp_range: TimestampRange, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetDASHStreamingSessionURLInput { + ///

The time range of the requested fragment, and the source of the timestamps.

This parameter is required if PlaybackMode is ON_DEMAND or LIVE_REPLAY. This parameter is optional if PlaybackMode is LIVE. If PlaybackMode is LIVE, the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND or LIVE_REPLAY, both FragmentSelectorType and TimestampRange must be set.

+ #[serde(rename = "DASHFragmentSelector")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dash_fragment_selector: Option, + ///

Fragments are identified in the manifest file based on their sequence number in the session. If DisplayFragmentNumber is set to ALWAYS, the Kinesis Video Streams fragment number is added to each S element in the manifest file with the attribute name “kvs:fn”. These fragment numbers can be used for logging or for use with other APIs (e.g. GetMedia and GetMediaForFragmentList). A custom MPEG-DASH media player is necessary to leverage these this custom attribute.

The default value is NEVER.

+ #[serde(rename = "DisplayFragmentNumber")] + #[serde(skip_serializing_if = "Option::is_none")] + pub display_fragment_number: Option, + ///

Per the MPEG-DASH specification, the wall-clock time of fragments in the manifest file can be derived using attributes in the manifest itself. However, typically, MPEG-DASH compatible media players do not properly handle gaps in the media timeline. Kinesis Video Streams adjusts the media timeline in the manifest file to enable playback of media with discontinuities. Therefore, the wall-clock time derived from the manifest file may be inaccurate. If DisplayFragmentTimestamp is set to ALWAYS, the accurate fragment timestamp is added to each S element in the manifest file with the attribute name “kvs:ts”. A custom MPEG-DASH media player is necessary to leverage this custom attribute.

The default value is NEVER. When DASHFragmentSelector is SERVER_TIMESTAMP, the timestamps will be the server start timestamps. Similarly, when DASHFragmentSelector is PRODUCER_TIMESTAMP, the timestamps will be the producer start timestamps.

+ #[serde(rename = "DisplayFragmentTimestamp")] + #[serde(skip_serializing_if = "Option::is_none")] + pub display_fragment_timestamp: Option, + ///

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetDashManifest, GetMP4InitFragment, or GetMP4MediaFragment can be made for that session.

The default is 300 (5 minutes).

+ #[serde(rename = "Expires")] + #[serde(skip_serializing_if = "Option::is_none")] + pub expires: Option, + ///

The maximum number of fragments that are returned in the MPEG-DASH manifest.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live MPEG-DASH manifest, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

+ #[serde(rename = "MaxManifestFragmentResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_manifest_fragment_results: Option, + ///

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

  • LIVE : For sessions of this type, the MPEG-DASH manifest is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new manifest on a one-second interval. When this type of session is played in a media player, the user interface typically displays a "live" notification, with no scrubber control for choosing the position in the playback window to display.

    In LIVE mode, the newest available fragments are included in an MPEG-DASH manifest, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the MPEG-DASH manifest if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the manifest, the older fragment is not added, and the gap is not filled.

  • LIVE_REPLAY : For sessions of this type, the MPEG-DASH manifest is updated similarly to how it is updated for LIVE mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the manifest every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND mode.

  • ON_DEMAND : For sessions of this type, the MPEG-DASH manifest contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults. The manifest must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.

The default is LIVE.

+ #[serde(rename = "PlaybackMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub playback_mode: Option, + ///

The Amazon Resource Name (ARN) of the stream for which to retrieve the MPEG-DASH manifest URL.

You must specify either the StreamName or the StreamARN.

+ #[serde(rename = "StreamARN")] + #[serde(skip_serializing_if = "Option::is_none")] + pub stream_arn: Option, + ///

The name of the stream for which to retrieve the MPEG-DASH manifest URL.

You must specify either the StreamName or the StreamARN.

+ #[serde(rename = "StreamName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub stream_name: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetDASHStreamingSessionURLOutput { + ///

The URL (containing the session token) that a media player can use to retrieve the MPEG-DASH manifest.

+ #[serde(rename = "DASHStreamingSessionURL")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dash_streaming_session_url: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetHLSStreamingSessionURLInput { ///

Specifies which format should be used for packaging the media. Specifying the FRAGMENTED_MP4 container format packages the media into MP4 fragments (fMP4 or CMAF). This is the recommended packaging because there is minimal packaging overhead. The other container format option is MPEG_TS. HLS has supported MPEG TS chunks since it was released and is sometimes the only supported packaging on older HLS players. MPEG TS typically has a 5-25 percent packaging overhead. This means MPEG TS typically requires 5-25 percent more bandwidth and cost than fMP4.

The default is FRAGMENTED_MP4.

@@ -75,19 +145,19 @@ pub struct GetHLSStreamingSessionURLInput { #[serde(rename = "DisplayFragmentTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub display_fragment_timestamp: Option, - ///

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetHLSMasterPlaylist, GetHLSMediaPlaylist, GetMP4InitFragment, or GetMP4MediaFragment can be made for that session.

The default is 300 (5 minutes).

+ ///

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetHLSMasterPlaylist, GetHLSMediaPlaylist, GetMP4InitFragment, GetMP4MediaFragment, or GetTSFragment can be made for that session.

The default is 300 (5 minutes).

#[serde(rename = "Expires")] #[serde(skip_serializing_if = "Option::is_none")] pub expires: Option, - ///

The time range of the requested fragment, and the source of the timestamps.

This parameter is required if PlaybackMode is ON_DEMAND. This parameter is optional if PlaybackMode is LIVE. If PlaybackMode is LIVE, the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND, both FragmentSelectorType and TimestampRange must be set.

+ ///

The time range of the requested fragment, and the source of the timestamps.

This parameter is required if PlaybackMode is ON_DEMAND or LIVE_REPLAY. This parameter is optional if PlaybackMode is LIVE. If PlaybackMode is LIVE, the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND or LIVE_REPLAY, both FragmentSelectorType and TimestampRange must be set.

#[serde(rename = "HLSFragmentSelector")] #[serde(skip_serializing_if = "Option::is_none")] pub hls_fragment_selector: Option, - ///

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

+ ///

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

#[serde(rename = "MaxMediaPlaylistFragmentResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_media_playlist_fragment_results: Option, - ///

Whether to retrieve live or archived, on-demand data.

Features of the two types of session include the following:

  • LIVE : For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a "live" notification, with no scrubber control for choosing the position in the playback window to display.

    In LIVE mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.

  • ON_DEMAND : For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.

In both playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

+ ///

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

  • LIVE : For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a "live" notification, with no scrubber control for choosing the position in the playback window to display.

    In LIVE mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.

  • LIVE_REPLAY : For sessions of this type, the HLS media playlist is updated similarly to how it is updated for LIVE mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the media playlist every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND mode.

  • ON_DEMAND : For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

#[serde(rename = "PlaybackMode")] #[serde(skip_serializing_if = "Option::is_none")] pub playback_mode: Option, @@ -102,7 +172,7 @@ pub struct GetHLSStreamingSessionURLInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetHLSStreamingSessionURLOutput { ///

The URL (containing the session token) that a media player can use to retrieve the HLS master playlist.

#[serde(rename = "HLSStreamingSessionURL")] @@ -131,7 +201,7 @@ pub struct GetMediaForFragmentListOutput { ///

Contains the range of timestamps for the requested media, and the source of the timestamps.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct HLSFragmentSelector { - ///

The source of the timestamps for the requested media.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND, the first fragment ingested with a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer timestamps within the TimestampRange ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.

Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE, the producer timestamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server timestamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer timestamps with values now, they are not included in the HLS media playlist.

The default is SERVER_TIMESTAMP.

+ ///

The source of the timestamps for the requested media.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND or LIVE_REPLAY, the first fragment ingested with a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer timestamps within the TimestampRange ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.

Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE, the producer timestamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server timestamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer timestamps with values now, they are not included in the HLS media playlist.

The default is SERVER_TIMESTAMP.

#[serde(rename = "FragmentSelectorType")] #[serde(skip_serializing_if = "Option::is_none")] pub fragment_selector_type: Option, @@ -144,7 +214,7 @@ pub struct HLSFragmentSelector { ///

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the HLSTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct HLSTimestampRange { - ///

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

If the HLSTimestampRange value is specified, the EndTimestamp value is required.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

+ ///

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVERTIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ONDEMAND mode, but optional for LIVEREPLAY mode. If the EndTimestamp is not set for LIVEREPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

#[serde(rename = "EndTimestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub end_timestamp: Option, @@ -174,7 +244,7 @@ pub struct ListFragmentsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFragmentsOutput { ///

A list of archived Fragment objects from the stream that meet the selector criteria. Results are in no specific order, even across pages.

#[serde(rename = "Fragments")] @@ -197,6 +267,99 @@ pub struct TimestampRange { pub start_timestamp: f64, } +/// Errors returned by GetDASHStreamingSessionURL +#[derive(Debug, PartialEq)] +pub enum GetDASHStreamingSessionURLError { + ///

Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client calls. Try making the call later.

+ ClientLimitExceeded(String), + ///

A specified parameter exceeds its restrictions, is not supported, or can't be used.

+ InvalidArgument(String), + ///

The codec private data in at least one of the tracks of the video stream is not valid for this operation.

+ InvalidCodecPrivateData(String), + ///

No codec private data was found in at least one of tracks of the video stream.

+ MissingCodecPrivateData(String), + ///

A streaming session was requested for a stream that does not retain data (that is, has a DataRetentionInHours of 0).

+ NoDataRetention(String), + ///

Status Code: 403, The caller is not authorized to perform an operation on the given stream, or the token has expired.

+ NotAuthorized(String), + ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

+ ResourceNotFound(String), + ///

The type of the media (for example, h.264 or h.265 video or ACC or G.711 audio) could not be determined from the codec IDs of the tracks in the first fragment for a playback session. The codec ID for track 1 should be V_MPEG/ISO/AVC and, optionally, the codec ID for track 2 should be A_AAC.

+ UnsupportedStreamMediaType(String), +} + +impl GetDASHStreamingSessionURLError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ClientLimitExceededException" => { + return RusotoError::Service( + GetDASHStreamingSessionURLError::ClientLimitExceeded(err.msg), + ) + } + "InvalidArgumentException" => { + return RusotoError::Service(GetDASHStreamingSessionURLError::InvalidArgument( + err.msg, + )) + } + "InvalidCodecPrivateDataException" => { + return RusotoError::Service( + GetDASHStreamingSessionURLError::InvalidCodecPrivateData(err.msg), + ) + } + "MissingCodecPrivateDataException" => { + return RusotoError::Service( + GetDASHStreamingSessionURLError::MissingCodecPrivateData(err.msg), + ) + } + "NoDataRetentionException" => { + return RusotoError::Service(GetDASHStreamingSessionURLError::NoDataRetention( + err.msg, + )) + } + "NotAuthorizedException" => { + return RusotoError::Service(GetDASHStreamingSessionURLError::NotAuthorized( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetDASHStreamingSessionURLError::ResourceNotFound( + err.msg, + )) + } + "UnsupportedStreamMediaTypeException" => { + return RusotoError::Service( + GetDASHStreamingSessionURLError::UnsupportedStreamMediaType(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetDASHStreamingSessionURLError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetDASHStreamingSessionURLError { + fn description(&self) -> &str { + match *self { + GetDASHStreamingSessionURLError::ClientLimitExceeded(ref cause) => cause, + GetDASHStreamingSessionURLError::InvalidArgument(ref cause) => cause, + GetDASHStreamingSessionURLError::InvalidCodecPrivateData(ref cause) => cause, + GetDASHStreamingSessionURLError::MissingCodecPrivateData(ref cause) => cause, + GetDASHStreamingSessionURLError::NoDataRetention(ref cause) => cause, + GetDASHStreamingSessionURLError::NotAuthorized(ref cause) => cause, + GetDASHStreamingSessionURLError::ResourceNotFound(ref cause) => cause, + GetDASHStreamingSessionURLError::UnsupportedStreamMediaType(ref cause) => cause, + } + } +} /// Errors returned by GetHLSStreamingSessionURL #[derive(Debug, PartialEq)] pub enum GetHLSStreamingSessionURLError { @@ -208,13 +371,13 @@ pub enum GetHLSStreamingSessionURLError { InvalidCodecPrivateData(String), ///

No codec private data was found in at least one of tracks of the video stream.

MissingCodecPrivateData(String), - ///

A PlaybackMode of ON_DEMAND was requested for a stream that does not retain data (that is, has a DataRetentionInHours of 0).

+ ///

A streaming session was requested for a stream that does not retain data (that is, has a DataRetentionInHours of 0).

NoDataRetention(String), ///

Status Code: 403, The caller is not authorized to perform an operation on the given stream, or the token has expired.

NotAuthorized(String), - ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL throws this error if a session with a PlaybackMode of ON_DEMAND is requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

+ ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

ResourceNotFound(String), - ///

The type of the media (for example, h.264 video or ACC audio) could not be determined from the codec IDs of the tracks in the first fragment for a playback session. The codec ID for track 1 should be V_MPEG/ISO/AVC and, optionally, the codec ID for track 2 should be A_AAC.

+ ///

The type of the media (for example, h.264 or h.265 video or ACC or G.711 audio) could not be determined from the codec IDs of the tracks in the first fragment for a playback session. The codec ID for track 1 should be V_MPEG/ISO/AVC and, optionally, the codec ID for track 2 should be A_AAC.

UnsupportedStreamMediaType(String), } @@ -297,7 +460,7 @@ pub enum GetMediaForFragmentListError { InvalidArgument(String), ///

Status Code: 403, The caller is not authorized to perform an operation on the given stream, or the token has expired.

NotAuthorized(String), - ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL throws this error if a session with a PlaybackMode of ON_DEMAND is requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

+ ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

ResourceNotFound(String), } @@ -356,7 +519,7 @@ pub enum ListFragmentsError { InvalidArgument(String), ///

Status Code: 403, The caller is not authorized to perform an operation on the given stream, or the token has expired.

NotAuthorized(String), - ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL throws this error if a session with a PlaybackMode of ON_DEMAND is requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

+ ///

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

ResourceNotFound(String), } @@ -400,19 +563,25 @@ impl Error for ListFragmentsError { } /// Trait representing the capabilities of the Kinesis Video Archived Media API. Kinesis Video Archived Media clients implement this trait. pub trait KinesisVideoArchivedMedia { - ///

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

You must specify either the StreamName or the StreamARN.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media must contain h.264 encoded video and, optionally, AAC encoded audio. Specifically, the codec id of track 1 should be V_MPEG/ISO/AVC. Optionally, the codec id of track 2 should be A_AAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF), rather than the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of five active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

+ ///

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec id of track 1 should be VMPEG/ISO/AVC (for h.264) or VMPEGH/ISO/HEVC (for H.265). Optionally, the codec id of track 2 should be AAAC (for AAC) or AMS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GETDASHSTREAMINGSESSIONURL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment, and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

The following restrictions apply to MPEG-DASH sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active MPEG-DASH sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

+ fn get_dash_streaming_session_url( + &self, + input: GetDASHStreamingSessionURLInput, + ) -> RusotoFuture; + + ///

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec id of track 1 should be VMPEG/ISO/AVC (for h.264) or VMPEG/ISO/HEVC (for h.265). Optionally, the codec id of track 2 should be AAAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using <a href="http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/APIGetDataEndpoint.html">GetDataEndpoint, specifying GETHLSSTREAMINGSESSIONURL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ONDEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ONDEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn get_hls_streaming_session_url( &self, input: GetHLSStreamingSessionURLInput, ) -> RusotoFuture; - ///

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

  • A client can call GetMediaForFragmentList up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList session.

+ ///

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

  • A client can call GetMediaForFragmentList up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList session.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn get_media_for_fragment_list( &self, input: GetMediaForFragmentListInput, ) -> RusotoFuture; - ///

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

+ ///

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn list_fragments( &self, input: ListFragmentsInput, @@ -430,10 +599,7 @@ impl KinesisVideoArchivedMediaClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KinesisVideoArchivedMediaClient { - KinesisVideoArchivedMediaClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -447,15 +613,51 @@ impl KinesisVideoArchivedMediaClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KinesisVideoArchivedMediaClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client( + client: Client, + region: region::Region, + ) -> KinesisVideoArchivedMediaClient { + KinesisVideoArchivedMediaClient { client, region } } } impl KinesisVideoArchivedMedia for KinesisVideoArchivedMediaClient { - ///

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

You must specify either the StreamName or the StreamARN.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media must contain h.264 encoded video and, optionally, AAC encoded audio. Specifically, the codec id of track 1 should be V_MPEG/ISO/AVC. Optionally, the codec id of track 2 should be A_AAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF), rather than the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of five active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

+ ///

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec id of track 1 should be VMPEG/ISO/AVC (for h.264) or VMPEGH/ISO/HEVC (for H.265). Optionally, the codec id of track 2 should be AAAC (for AAC) or AMS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GETDASHSTREAMINGSESSIONURL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment, and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

The following restrictions apply to MPEG-DASH sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active MPEG-DASH sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

+ fn get_dash_streaming_session_url( + &self, + input: GetDASHStreamingSessionURLInput, + ) -> RusotoFuture { + let request_uri = "/getDASHStreamingSessionURL"; + + let mut request = SignedRequest::new("POST", "kinesisvideo", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetDASHStreamingSessionURLError::from_response(response)) + })) + } + }) + } + + ///

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec id of track 1 should be VMPEG/ISO/AVC (for h.264) or VMPEG/ISO/HEVC (for h.265). Optionally, the codec id of track 2 should be AAAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using <a href="http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/APIGetDataEndpoint.html">GetDataEndpoint, specifying GETHLSSTREAMINGSESSIONURL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ONDEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ONDEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn get_hls_streaming_session_url( &self, input: GetHLSStreamingSessionURLInput, @@ -484,7 +686,7 @@ impl KinesisVideoArchivedMedia for KinesisVideoArchivedMediaClient { }) } - ///

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

  • A client can call GetMediaForFragmentList up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList session.

+ ///

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

  • A client can call GetMediaForFragmentList up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList session.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn get_media_for_fragment_list( &self, input: GetMediaForFragmentListInput, @@ -518,7 +720,7 @@ impl KinesisVideoArchivedMedia for KinesisVideoArchivedMediaClient { }) } - ///

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

+ ///

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn list_fragments( &self, input: ListFragmentsInput, diff --git a/rusoto/services/kinesis-video-media/Cargo.toml b/rusoto/services/kinesis-video-media/Cargo.toml index 8c25115c165..84c8e6a9459 100644 --- a/rusoto/services/kinesis-video-media/Cargo.toml +++ b/rusoto/services/kinesis-video-media/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kinesis_video_media" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kinesis-video-media/README.md b/rusoto/services/kinesis-video-media/README.md index 12ee9d0e997..ec28076439a 100644 --- a/rusoto/services/kinesis-video-media/README.md +++ b/rusoto/services/kinesis-video-media/README.md @@ -23,9 +23,16 @@ To use `rusoto_kinesis_video_media` in your application, add it as a dependency ```toml [dependencies] -rusoto_kinesis_video_media = "0.40.0" +rusoto_kinesis_video_media = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kinesis-video-media/src/custom/mod.rs b/rusoto/services/kinesis-video-media/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kinesis-video-media/src/custom/mod.rs +++ b/rusoto/services/kinesis-video-media/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kinesis-video-media/src/generated.rs b/rusoto/services/kinesis-video-media/src/generated.rs index 7d04936d0a7..1682c9f668b 100644 --- a/rusoto/services/kinesis-video-media/src/generated.rs +++ b/rusoto/services/kinesis-video-media/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -58,7 +57,7 @@ pub struct StartSelector { #[serde(rename = "ContinuationToken")] #[serde(skip_serializing_if = "Option::is_none")] pub continuation_token: Option, - ///

Identifies the fragment on the Kinesis video stream where you want to start getting the data from.

  • NOW - Start with the latest chunk on the stream.

  • EARLIEST - Start with earliest available chunk on the stream.

  • FRAGMENTNUMBER - Start with the chunk containing the specific fragment. You must also specify the StartFragmentNumber.

  • PRODUCERTIMESTAMP or SERVERTIMESTAMP - Start with the chunk containing a fragment with the specified producer or server timestamp. You specify the timestamp by adding StartTimestamp.

  • CONTINUATIONTOKEN - Read using the specified continuation token.

If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType, you don't provide any additional information in the startSelector.

+ ///

Identifies the fragment on the Kinesis video stream where you want to start getting the data from.

  • NOW - Start with the latest chunk on the stream.

  • EARLIEST - Start with earliest available chunk on the stream.

  • FRAGMENTNUMBER - Start with the chunk after a specific fragment. You must also specify the AfterFragmentNumber parameter.

  • PRODUCERTIMESTAMP or SERVERTIMESTAMP - Start with the chunk containing a fragment with the specified producer or server timestamp. You specify the timestamp by adding StartTimestamp.

  • CONTINUATIONTOKEN - Read using the specified continuation token.

If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType, you don't provide any additional information in the startSelector.

#[serde(rename = "StartSelectorType")] pub start_selector_type: String, ///

A timestamp value. This value is required if you choose the PRODUCER_TIMESTAMP or the SERVER_TIMESTAMP as the startSelectorType. The GetMedia API then starts with the chunk containing the fragment that has the specified timestamp.

@@ -132,7 +131,7 @@ impl Error for GetMediaError { } /// Trait representing the capabilities of the Kinesis Video Media API. Kinesis Video Media clients implement this trait. pub trait KinesisVideoMedia { - ///

Use this API to retrieve media content from a Kinesis video stream. In the request, you identify the stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMedia requests to this endpoint using the --endpoint-url parameter.

When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a "chunk." For more information, see . The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.

The following limits apply when using the GetMedia API:

  • A client can call GetMedia up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMedia session.

+ ///

Use this API to retrieve media content from a Kinesis video stream. In the request, you identify the stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMedia requests to this endpoint using the --endpoint-url parameter.

When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a "chunk." For more information, see PutMedia. The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.

The following limits apply when using the GetMedia API:

  • A client can call GetMedia up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMedia session.

If an error is thrown after invoking a Kinesis Video Streams media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn get_media(&self, input: GetMediaInput) -> RusotoFuture; } /// A client for the Kinesis Video Media API. @@ -147,10 +146,7 @@ impl KinesisVideoMediaClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KinesisVideoMediaClient { - KinesisVideoMediaClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -164,15 +160,19 @@ impl KinesisVideoMediaClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KinesisVideoMediaClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KinesisVideoMediaClient { + KinesisVideoMediaClient { client, region } } } impl KinesisVideoMedia for KinesisVideoMediaClient { - ///

Use this API to retrieve media content from a Kinesis video stream. In the request, you identify the stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMedia requests to this endpoint using the --endpoint-url parameter.

When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a "chunk." For more information, see . The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.

The following limits apply when using the GetMedia API:

  • A client can call GetMedia up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMedia session.

+ ///

Use this API to retrieve media content from a Kinesis video stream. In the request, you identify the stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMedia requests to this endpoint using the --endpoint-url parameter.

When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a "chunk." For more information, see PutMedia. The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.

The following limits apply when using the GetMedia API:

  • A client can call GetMedia up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMedia session.

If an error is thrown after invoking a Kinesis Video Streams media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

fn get_media(&self, input: GetMediaInput) -> RusotoFuture { let request_uri = "/getMedia"; diff --git a/rusoto/services/kinesis/Cargo.toml b/rusoto/services/kinesis/Cargo.toml index 1cdaadbdee3..35dce114f79 100644 --- a/rusoto/services/kinesis/Cargo.toml +++ b/rusoto/services/kinesis/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kinesis" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kinesis/README.md b/rusoto/services/kinesis/README.md index cede18c1c5e..910bb5e5126 100644 --- a/rusoto/services/kinesis/README.md +++ b/rusoto/services/kinesis/README.md @@ -23,9 +23,16 @@ To use `rusoto_kinesis` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_kinesis = "0.40.0" +rusoto_kinesis = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kinesis/src/custom/mod.rs b/rusoto/services/kinesis/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kinesis/src/custom/mod.rs +++ b/rusoto/services/kinesis/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kinesis/src/generated.rs b/rusoto/services/kinesis/src/generated.rs index 47290d6119f..a0235e2ce87 100644 --- a/rusoto/services/kinesis/src/generated.rs +++ b/rusoto/services/kinesis/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -37,7 +36,7 @@ pub struct AddTagsToStreamInput { ///

An object that represents the details of the consumer you registered.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Consumer { ///

When you register a consumer, Kinesis Data Streams generates an ARN for it. You need this ARN to be able to call SubscribeToShard.

If you delete a consumer and then create a new one with the same name, it won't have the same ARN. That's because consumer ARNs contain the creation timestamp. This is important to keep in mind if you have IAM policies that reference consumer ARNs.

#[serde(rename = "ConsumerARN")] @@ -55,7 +54,7 @@ pub struct Consumer { ///

An object that represents the details of a registered consumer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConsumerDescription { ///

When you register a consumer, Kinesis Data Streams generates an ARN for it. You need this ARN to be able to call SubscribeToShard.

If you delete a consumer and then create a new one with the same name, it won't have the same ARN. That's because consumer ARNs contain the creation timestamp. This is important to keep in mind if you have IAM policies that reference consumer ARNs.

#[serde(rename = "ConsumerARN")] @@ -128,7 +127,7 @@ pub struct DeregisterStreamConsumerInput { pub struct DescribeLimitsInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLimitsOutput { ///

The number of open shards.

#[serde(rename = "OpenShardCount")] @@ -155,7 +154,7 @@ pub struct DescribeStreamConsumerInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamConsumerOutput { ///

An object that represents the details of the consumer.

#[serde(rename = "ConsumerDescription")] @@ -180,7 +179,7 @@ pub struct DescribeStreamInput { ///

Represents the output for DescribeStream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamOutput { ///

The current status of the stream, the stream Amazon Resource Name (ARN), an array of shard objects that comprise the stream, and whether there are more shards available.

#[serde(rename = "StreamDescription")] @@ -195,7 +194,7 @@ pub struct DescribeStreamSummaryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamSummaryOutput { ///

A StreamDescriptionSummary containing information about the stream.

#[serde(rename = "StreamDescriptionSummary")] @@ -226,7 +225,7 @@ pub struct EnableEnhancedMonitoringInput { ///

Represents enhanced metrics types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnhancedMetrics { ///

List of shard-level metrics.

The following are the valid shard-level metrics. The value "ALL" enhances every metric.

  • IncomingBytes

  • IncomingRecords

  • OutgoingBytes

  • OutgoingRecords

  • WriteProvisionedThroughputExceeded

  • ReadProvisionedThroughputExceeded

  • IteratorAgeMilliseconds

  • ALL

For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide.

#[serde(rename = "ShardLevelMetrics")] @@ -236,7 +235,7 @@ pub struct EnhancedMetrics { ///

Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnhancedMonitoringOutput { ///

Represents the current state of the metrics that are in the enhanced state before the operation.

#[serde(rename = "CurrentShardLevelMetrics")] @@ -279,7 +278,7 @@ pub struct GetRecordsInput { ///

Represents the output for GetRecords.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRecordsOutput { ///

The number of milliseconds the GetRecords response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment.

#[serde(rename = "MillisBehindLatest")] @@ -318,7 +317,7 @@ pub struct GetShardIteratorInput { ///

Represents the output for GetShardIterator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetShardIteratorOutput { ///

The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.

#[serde(rename = "ShardIterator")] @@ -328,7 +327,7 @@ pub struct GetShardIteratorOutput { ///

The range of possible hash key values for the shard, which is a set of ordered contiguous positive integers.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HashKeyRange { ///

The ending hash key of the hash key range.

#[serde(rename = "EndingHashKey")] @@ -350,7 +349,7 @@ pub struct IncreaseStreamRetentionPeriodInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InternalFailureException { #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] @@ -366,7 +365,7 @@ pub struct InvalidArgumentException { ///

The ciphertext references a key that doesn't exist or that you don't have access to.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KMSAccessDeniedException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -376,7 +375,7 @@ pub struct KMSAccessDeniedException { ///

The request was rejected because the specified customer master key (CMK) isn't enabled.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KMSDisabledException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -386,7 +385,7 @@ pub struct KMSDisabledException { ///

The request was rejected because the state of the specified resource isn't valid for this request. For more information, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KMSInvalidStateException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -396,7 +395,7 @@ pub struct KMSInvalidStateException { ///

The request was rejected because the specified entity or resource can't be found.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KMSNotFoundException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -406,7 +405,7 @@ pub struct KMSNotFoundException { ///

The AWS access key ID needs a subscription for the service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KMSOptInRequired { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -416,7 +415,7 @@ pub struct KMSOptInRequired { ///

The request was denied due to request throttling. For more information about throttling, see Limits in the AWS Key Management Service Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KMSThrottlingException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -456,7 +455,7 @@ pub struct ListShardsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListShardsOutput { ///

When the number of shards in the data stream is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of shards in the data stream, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListShards to list the next set of shards. For more information about the use of this pagination token when calling the ListShards operation, see ListShardsInput$NextToken.

Tokens expire after 300 seconds. When you obtain a value for NextToken in the response to a call to ListShards, you have 300 seconds to use that value. If you specify an expired token in a call to ListShards, you get ExpiredNextTokenException.

#[serde(rename = "NextToken")] @@ -488,7 +487,7 @@ pub struct ListStreamConsumersInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStreamConsumersOutput { ///

An array of JSON objects. Each object represents one registered consumer.

#[serde(rename = "Consumers")] @@ -515,7 +514,7 @@ pub struct ListStreamsInput { ///

Represents the output for ListStreams.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStreamsOutput { ///

If set to true, there are more streams available to list.

#[serde(rename = "HasMoreStreams")] @@ -543,7 +542,7 @@ pub struct ListTagsForStreamInput { ///

Represents the output for ListTagsForStream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForStreamOutput { ///

If set to true, more tags are available. To request additional tags, set ExclusiveStartTagKey to the key of the last tag returned.

#[serde(rename = "HasMoreTags")] @@ -603,7 +602,7 @@ pub struct PutRecordInput { ///

Represents the output for PutRecord.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRecordOutput { ///

The encryption type to use on the record. This parameter can be one of the following values:

  • NONE: Do not encrypt the records in the stream.

  • KMS: Use server-side encryption on the records in the stream using a customer-managed AWS KMS key.

#[serde(rename = "EncryptionType")] @@ -630,7 +629,7 @@ pub struct PutRecordsInput { ///

PutRecords results.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRecordsOutput { ///

The encryption type used on the records. This parameter can be one of the following values:

  • NONE: Do not encrypt the records.

  • KMS: Use server-side encryption on the records using a customer-managed AWS KMS key.

#[serde(rename = "EncryptionType")] @@ -667,7 +666,7 @@ pub struct PutRecordsRequestEntry { ///

Represents the result of an individual record from a PutRecords request. A record that is successfully added to a stream includes SequenceNumber and ShardId in the result. A record that fails to be added to the stream includes ErrorCode and ErrorMessage in the result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutRecordsResultEntry { ///

The error code for an individual record result. ErrorCodes can be either ProvisionedThroughputExceededException or InternalFailure.

#[serde(rename = "ErrorCode")] @@ -689,7 +688,7 @@ pub struct PutRecordsResultEntry { ///

The unit of data of the Kinesis data stream, which is composed of a sequence number, a partition key, and a data blob.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Record { ///

The approximate time that the record was inserted into the stream.

#[serde(rename = "ApproximateArrivalTimestamp")] @@ -726,7 +725,7 @@ pub struct RegisterStreamConsumerInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterStreamConsumerOutput { ///

An object that represents the details of the consumer you registered. When you register a consumer, it gets an ARN that is generated by Kinesis Data Streams.

#[serde(rename = "Consumer")] @@ -746,7 +745,7 @@ pub struct RemoveTagsFromStreamInput { ///

The resource is not available for this operation. For successful operation, the resource must be in the ACTIVE state.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceInUseException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -756,7 +755,7 @@ pub struct ResourceInUseException { ///

The requested resource could not be found. The stream might not be specified correctly.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceNotFoundException { ///

A message that provides information about the error.

#[serde(rename = "message")] @@ -766,7 +765,7 @@ pub struct ResourceNotFoundException { ///

The range of possible sequence numbers for the shard.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SequenceNumberRange { ///

The ending sequence number for the range. Shards that are in the OPEN state have an ending sequence number of null.

#[serde(rename = "EndingSequenceNumber")] @@ -779,7 +778,7 @@ pub struct SequenceNumberRange { ///

A uniquely identified group of data records in a Kinesis data stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Shard { ///

The shard ID of the shard adjacent to the shard's parent.

#[serde(rename = "AdjacentParentShardId")] @@ -854,7 +853,7 @@ pub struct StopStreamEncryptionInput { ///

Represents the output for DescribeStream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamDescription { ///

The server-side encryption type used on the stream. This parameter can be one of the following values:

  • NONE: Do not encrypt the records in the stream.

  • KMS: Use server-side encryption on the records in the stream using a customer-managed AWS KMS key.

#[serde(rename = "EncryptionType")] @@ -892,7 +891,7 @@ pub struct StreamDescription { ///

Represents the output for DescribeStreamSummary

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamDescriptionSummary { ///

The number of enhanced fan-out consumers registered with the stream.

#[serde(rename = "ConsumerCount")] @@ -931,7 +930,7 @@ pub struct StreamDescriptionSummary { ///

After you call SubscribeToShard, Kinesis Data Streams sends events of this type to your consumer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribeToShardEvent { ///

Use this as StartingSequenceNumber in the next call to SubscribeToShard.

#[serde(rename = "ContinuationSequenceNumber")] @@ -945,7 +944,7 @@ pub struct SubscribeToShardEvent { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribeToShardEventStream { #[serde(rename = "InternalFailureException")] #[serde(skip_serializing_if = "Option::is_none")] @@ -991,7 +990,7 @@ pub struct SubscribeToShardInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribeToShardOutput { ///

The event stream that your consumer can use to read records from the shard.

#[serde(rename = "EventStream")] @@ -1000,7 +999,7 @@ pub struct SubscribeToShardOutput { ///

Metadata assigned to the stream, consisting of a key-value pair.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Tag { ///

A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

#[serde(rename = "Key")] @@ -1025,7 +1024,7 @@ pub struct UpdateShardCountInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateShardCountOutput { ///

The current number of shards.

#[serde(rename = "CurrentShardCount")] @@ -2769,10 +2768,7 @@ impl KinesisClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KinesisClient { - KinesisClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2786,10 +2782,14 @@ impl KinesisClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KinesisClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KinesisClient { + KinesisClient { client, region } } } diff --git a/rusoto/services/kinesisanalytics/Cargo.toml b/rusoto/services/kinesisanalytics/Cargo.toml index e2718a3d354..cb213c2610e 100644 --- a/rusoto/services/kinesisanalytics/Cargo.toml +++ b/rusoto/services/kinesisanalytics/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kinesisanalytics" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kinesisanalytics/README.md b/rusoto/services/kinesisanalytics/README.md index ffe34132a71..58b2d1853a8 100644 --- a/rusoto/services/kinesisanalytics/README.md +++ b/rusoto/services/kinesisanalytics/README.md @@ -23,9 +23,16 @@ To use `rusoto_kinesisanalytics` in your application, add it as a dependency in ```toml [dependencies] -rusoto_kinesisanalytics = "0.40.0" +rusoto_kinesisanalytics = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kinesisanalytics/src/custom/mod.rs b/rusoto/services/kinesisanalytics/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kinesisanalytics/src/custom/mod.rs +++ b/rusoto/services/kinesisanalytics/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kinesisanalytics/src/generated.rs b/rusoto/services/kinesisanalytics/src/generated.rs index d223242c126..e5074d1c978 100644 --- a/rusoto/services/kinesisanalytics/src/generated.rs +++ b/rusoto/services/kinesisanalytics/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -38,7 +37,7 @@ pub struct AddApplicationCloudWatchLoggingOptionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddApplicationCloudWatchLoggingOptionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -58,7 +57,7 @@ pub struct AddApplicationInputProcessingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddApplicationInputProcessingConfigurationResponse {} ///

@@ -77,7 +76,7 @@ pub struct AddApplicationInputRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddApplicationInputResponse {} ///

@@ -96,7 +95,7 @@ pub struct AddApplicationOutputRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddApplicationOutputResponse {} ///

@@ -115,12 +114,12 @@ pub struct AddApplicationReferenceDataSourceRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddApplicationReferenceDataSourceResponse {} ///

This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see Amazon Kinesis Data Analytics API V2 Documentation.

Provides a description of the application, including the application Amazon Resource Name (ARN), status, latest version, and input and output configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplicationDetail { ///

ARN of the application.

#[serde(rename = "ApplicationARN")] @@ -170,7 +169,7 @@ pub struct ApplicationDetail { ///

This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see Amazon Kinesis Data Analytics API V2 Documentation.

Provides application summary information, including the application Amazon Resource Name (ARN), name, and status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplicationSummary { ///

ARN of the application.

#[serde(rename = "ApplicationARN")] @@ -232,7 +231,7 @@ pub struct CloudWatchLoggingOption { ///

Description of the CloudWatch logging option.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CloudWatchLoggingOptionDescription { ///

ID of the CloudWatch logging option description.

#[serde(rename = "CloudWatchLoggingOptionId")] @@ -288,7 +287,7 @@ pub struct CreateApplicationRequest { #[serde(rename = "Outputs")] #[serde(skip_serializing_if = "Option::is_none")] pub outputs: Option>, - ///

A list of one or more tags to assign to the application. A tag is a key-value pair that identifies an application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management Guide.

+ ///

A list of one or more tags to assign to the application. A tag is a key-value pair that identifies an application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -296,7 +295,7 @@ pub struct CreateApplicationRequest { ///

TBD

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApplicationResponse { ///

In response to your CreateApplication request, Amazon Kinesis Analytics returns a response with a summary of the application it created, including the application Amazon Resource Name (ARN), name, and status.

#[serde(rename = "ApplicationSummary")] @@ -317,7 +316,7 @@ pub struct DeleteApplicationCloudWatchLoggingOptionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApplicationCloudWatchLoggingOptionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -334,7 +333,7 @@ pub struct DeleteApplicationInputProcessingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApplicationInputProcessingConfigurationResponse {} ///

@@ -353,7 +352,7 @@ pub struct DeleteApplicationOutputRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApplicationOutputResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -370,7 +369,7 @@ pub struct DeleteApplicationReferenceDataSourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApplicationReferenceDataSourceResponse {} ///

@@ -386,7 +385,7 @@ pub struct DeleteApplicationRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteApplicationResponse {} ///

@@ -399,7 +398,7 @@ pub struct DescribeApplicationRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeApplicationResponse { ///

Provides a description of the application, such as the application Amazon Resource Name (ARN), status, latest version, and input and output configuration details.

#[serde(rename = "ApplicationDetail")] @@ -440,7 +439,7 @@ pub struct DiscoverInputSchemaRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiscoverInputSchemaResponse { ///

Schema inferred from the streaming source. It identifies the format of the data in the streaming source and how each data element maps to corresponding columns in the in-application stream that you can create.

#[serde(rename = "InputSchema")] @@ -500,7 +499,7 @@ pub struct InputConfiguration { ///

Describes the application input configuration. For more information, see Configuring Application Input.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputDescription { ///

Returns the in-application stream names that are mapped to the stream source.

#[serde(rename = "InAppStreamNames")] @@ -553,7 +552,7 @@ pub struct InputLambdaProcessor { ///

An object that contains the Amazon Resource Name (ARN) of the AWS Lambda function that is used to preprocess records in the stream, and the ARN of the IAM role that is used to access the AWS Lambda expression.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputLambdaProcessorDescription { ///

The ARN of the AWS Lambda function that is used to preprocess the records in the stream.

#[serde(rename = "ResourceARN")] @@ -568,7 +567,7 @@ pub struct InputLambdaProcessorDescription { ///

Represents an update to the InputLambdaProcessor that is used to preprocess the records in the stream.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct InputLambdaProcessorUpdate { - ///

The Amazon Resource Name (ARN) of the new AWS Lambda function that is used to preprocess the records in the stream.

To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see Example ARNs: AWS Lambda

+ ///

The Amazon Resource Name (ARN) of the new AWS Lambda function that is used to preprocess the records in the stream.

To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see Example ARNs: AWS Lambda

#[serde(rename = "ResourceARNUpdate")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_arn_update: Option, @@ -606,7 +605,7 @@ pub struct InputProcessingConfiguration { ///

Provides configuration information about an input processor. Currently, the only input processor available is AWS Lambda.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputProcessingConfigurationDescription { ///

Provides configuration information about the associated InputLambdaProcessorDescription.

#[serde(rename = "InputLambdaProcessorDescription")] @@ -701,7 +700,7 @@ pub struct KinesisFirehoseInput { ///

Describes the Amazon Kinesis Firehose delivery stream that is configured as the streaming source in the application input configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KinesisFirehoseInputDescription { ///

Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream.

#[serde(rename = "ResourceARN")] @@ -739,7 +738,7 @@ pub struct KinesisFirehoseOutput { ///

For an application output, describes the Amazon Kinesis Firehose delivery stream configured as its destination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KinesisFirehoseOutputDescription { ///

Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream.

#[serde(rename = "ResourceARN")] @@ -777,7 +776,7 @@ pub struct KinesisStreamsInput { ///

Describes the Amazon Kinesis stream that is configured as the streaming source in the application input configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KinesisStreamsInputDescription { ///

Amazon Resource Name (ARN) of the Amazon Kinesis stream.

#[serde(rename = "ResourceARN")] @@ -815,7 +814,7 @@ pub struct KinesisStreamsOutput { ///

For an application output, describes the Amazon Kinesis stream configured as its destination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KinesisStreamsOutputDescription { ///

Amazon Resource Name (ARN) of the Amazon Kinesis stream.

#[serde(rename = "ResourceARN")] @@ -853,7 +852,7 @@ pub struct LambdaOutput { ///

For an application output, describes the AWS Lambda function configured as its destination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaOutputDescription { ///

Amazon Resource Name (ARN) of the destination Lambda function.

#[serde(rename = "ResourceARN")] @@ -893,7 +892,7 @@ pub struct ListApplicationsRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApplicationsResponse { ///

List of ApplicationSummary objects.

#[serde(rename = "ApplicationSummaries")] @@ -911,7 +910,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The key-value tags assigned to the application.

#[serde(rename = "Tags")] @@ -957,7 +956,7 @@ pub struct Output { ///

Describes the application output configuration, which includes the in-application stream name and the destination where the stream data is written. The destination can be an Amazon Kinesis stream or an Amazon Kinesis Firehose delivery stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OutputDescription { ///

Data format used for writing data to the destination.

#[serde(rename = "DestinationSchema")] @@ -1057,7 +1056,7 @@ pub struct ReferenceDataSource { ///

Describes the reference data source configured for an application.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReferenceDataSourceDescription { ///

ID of the reference data source. This is the ID that Amazon Kinesis Analytics assigns when you add the reference data source to your application using the AddApplicationReferenceDataSource operation.

#[serde(rename = "ReferenceId")] @@ -1124,7 +1123,7 @@ pub struct S3ReferenceDataSource { ///

Provides the bucket name and object key name that stores the reference data.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct S3ReferenceDataSourceDescription { ///

Amazon Resource Name (ARN) of the S3 bucket.

#[serde(rename = "BucketARN")] @@ -1182,7 +1181,7 @@ pub struct StartApplicationRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartApplicationResponse {} ///

@@ -1195,10 +1194,10 @@ pub struct StopApplicationRequest { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopApplicationResponse {} -///

A key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management Guide.

+///

A key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { ///

The key of the key-value tag.

@@ -1221,7 +1220,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1235,7 +1234,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1252,7 +1251,7 @@ pub struct UpdateApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateApplicationResponse {} /// Errors returned by AddApplicationCloudWatchLoggingOption @@ -2066,7 +2065,7 @@ impl Error for DescribeApplicationError { pub enum DiscoverInputSchemaError { ///

Specified input parameter value is invalid.

InvalidArgument(String), - ///

Discovery failed to get a record from the streaming source because of the Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, see GetRecords in the Amazon Kinesis Streams API Reference.

+ ///

Discovery failed to get a record from the streaming source because of the Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, see GetRecords in the Amazon Kinesis Streams API Reference.

ResourceProvisionedThroughputExceeded(String), ///

The service is unavailable. Back off and retry the operation.

ServiceUnavailable(String), @@ -2587,7 +2586,7 @@ pub trait KinesisAnalytics { input: ListApplicationsRequest, ) -> RusotoFuture; - ///

Retrieves the list of key-value tags assigned to the application.

+ ///

Retrieves the list of key-value tags assigned to the application. For more information, see Using Tagging.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -2605,13 +2604,13 @@ pub trait KinesisAnalytics { input: StopApplicationRequest, ) -> RusotoFuture; - ///

Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50.

+ ///

Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

fn tag_resource( &self, input: TagResourceRequest, ) -> RusotoFuture; - ///

Removes one or more tags from a Kinesis Analytics application.

+ ///

Removes one or more tags from a Kinesis Analytics application. For more information, see Using Tagging.

fn untag_resource( &self, input: UntagResourceRequest, @@ -2635,10 +2634,7 @@ impl KinesisAnalyticsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KinesisAnalyticsClient { - KinesisAnalyticsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2652,10 +2648,14 @@ impl KinesisAnalyticsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KinesisAnalyticsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KinesisAnalyticsClient { + KinesisAnalyticsClient { client, region } } } @@ -3103,7 +3103,7 @@ impl KinesisAnalytics for KinesisAnalyticsClient { }) } - ///

Retrieves the list of key-value tags assigned to the application.

+ ///

Retrieves the list of key-value tags assigned to the application. For more information, see Using Tagging.

fn list_tags_for_resource( &self, input: ListTagsForResourceRequest, @@ -3192,7 +3192,7 @@ impl KinesisAnalytics for KinesisAnalyticsClient { }) } - ///

Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50.

+ ///

Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

fn tag_resource( &self, input: TagResourceRequest, @@ -3221,7 +3221,7 @@ impl KinesisAnalytics for KinesisAnalyticsClient { }) } - ///

Removes one or more tags from a Kinesis Analytics application.

+ ///

Removes one or more tags from a Kinesis Analytics application. For more information, see Using Tagging.

fn untag_resource( &self, input: UntagResourceRequest, diff --git a/rusoto/services/kinesisvideo/Cargo.toml b/rusoto/services/kinesisvideo/Cargo.toml index bbc8ffb1bae..96ccf39b9e1 100644 --- a/rusoto/services/kinesisvideo/Cargo.toml +++ b/rusoto/services/kinesisvideo/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kinesisvideo" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kinesisvideo/README.md b/rusoto/services/kinesisvideo/README.md index 34f123b1102..a1625a018c2 100644 --- a/rusoto/services/kinesisvideo/README.md +++ b/rusoto/services/kinesisvideo/README.md @@ -23,9 +23,16 @@ To use `rusoto_kinesisvideo` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_kinesisvideo = "0.40.0" +rusoto_kinesisvideo = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kinesisvideo/src/custom/mod.rs b/rusoto/services/kinesisvideo/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kinesisvideo/src/custom/mod.rs +++ b/rusoto/services/kinesisvideo/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kinesisvideo/src/generated.rs b/rusoto/services/kinesisvideo/src/generated.rs index f2e3d4c9fe1..cca32fcd6ce 100644 --- a/rusoto/services/kinesisvideo/src/generated.rs +++ b/rusoto/services/kinesisvideo/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -52,7 +51,7 @@ pub struct CreateStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStreamOutput { ///

The Amazon Resource Name (ARN) of the stream.

#[serde(rename = "StreamARN")] @@ -72,7 +71,7 @@ pub struct DeleteStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteStreamOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -88,7 +87,7 @@ pub struct DescribeStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamOutput { ///

An object that describes the stream.

#[serde(rename = "StreamInfo")] @@ -112,7 +111,7 @@ pub struct GetDataEndpointInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDataEndpointOutput { ///

The endpoint value. To read data from the stream or to write data to it, specify this endpoint in your application.

#[serde(rename = "DataEndpoint")] @@ -137,7 +136,7 @@ pub struct ListStreamsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStreamsOutput { ///

If the response is truncated, the call returns this element with a token. To get the next batch of streams, use this token in your next request.

#[serde(rename = "NextToken")] @@ -166,7 +165,7 @@ pub struct ListTagsForStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForStreamOutput { ///

If you specify this parameter and the result of a ListTags call is truncated, the response includes a token that you can use in the next request to fetch the next set of tags.

#[serde(rename = "NextToken")] @@ -180,7 +179,7 @@ pub struct ListTagsForStreamOutput { ///

An object describing a Kinesis video stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamInfo { ///

A time stamp that indicates when the stream was created.

#[serde(rename = "CreationTime")] @@ -249,7 +248,7 @@ pub struct TagStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagStreamOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -268,7 +267,7 @@ pub struct UntagStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagStreamOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -293,7 +292,7 @@ pub struct UpdateDataRetentionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDataRetentionOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -320,7 +319,7 @@ pub struct UpdateStreamInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateStreamOutput {} /// Errors returned by CreateStream @@ -980,10 +979,7 @@ impl KinesisVideoClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KinesisVideoClient { - KinesisVideoClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -997,10 +993,14 @@ impl KinesisVideoClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KinesisVideoClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KinesisVideoClient { + KinesisVideoClient { client, region } } } diff --git a/rusoto/services/kms/Cargo.toml b/rusoto/services/kms/Cargo.toml index 6bbb87e1e8d..293483f70c2 100644 --- a/rusoto/services/kms/Cargo.toml +++ b/rusoto/services/kms/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_kms" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/kms/README.md b/rusoto/services/kms/README.md index 1cc74ebab6a..00cbd50dc4f 100644 --- a/rusoto/services/kms/README.md +++ b/rusoto/services/kms/README.md @@ -23,9 +23,16 @@ To use `rusoto_kms` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_kms = "0.40.0" +rusoto_kms = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/kms/src/custom/mod.rs b/rusoto/services/kms/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/kms/src/custom/mod.rs +++ b/rusoto/services/kms/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/kms/src/generated.rs b/rusoto/services/kms/src/generated.rs index f0bdf5db0c9..a418ee5b93c 100644 --- a/rusoto/services/kms/src/generated.rs +++ b/rusoto/services/kms/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Contains information about an alias.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AliasListEntry { ///

String that contains the key ARN.

#[serde(rename = "AliasArn")] @@ -50,7 +49,7 @@ pub struct CancelKeyDeletionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelKeyDeletionResponse { ///

The unique identifier of the master key for which deletion is canceled.

#[serde(rename = "KeyId")] @@ -66,7 +65,7 @@ pub struct ConnectCustomKeyStoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConnectCustomKeyStoreResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -96,7 +95,7 @@ pub struct CreateCustomKeyStoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCustomKeyStoreResponse { ///

A unique identifier for the new custom key store.

#[serde(rename = "CustomKeyStoreId")] @@ -134,7 +133,7 @@ pub struct CreateGrantRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGrantResponse { ///

The unique identifier for the grant.

You can use the GrantId in a subsequent RetireGrant or RevokeGrant operation.

#[serde(rename = "GrantId")] @@ -179,7 +178,7 @@ pub struct CreateKeyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateKeyResponse { ///

Metadata associated with the CMK.

#[serde(rename = "KeyMetadata")] @@ -189,7 +188,7 @@ pub struct CreateKeyResponse { ///

Contains information about each custom key store in the custom key store list.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CustomKeyStoresListEntry { ///

A unique identifier for the AWS CloudHSM cluster that is associated with the custom key store.

#[serde(rename = "CloudHsmClusterId")] @@ -242,7 +241,7 @@ pub struct DecryptRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecryptResponse { ///

ARN of the key used to perform the decryption. This value is returned if no errors are encountered during the operation.

#[serde(rename = "KeyId")] @@ -274,7 +273,7 @@ pub struct DeleteCustomKeyStoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCustomKeyStoreResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -305,7 +304,7 @@ pub struct DescribeCustomKeyStoresRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCustomKeyStoresResponse { ///

Contains metadata about each custom key store.

#[serde(rename = "CustomKeyStores")] @@ -333,7 +332,7 @@ pub struct DescribeKeyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeKeyResponse { ///

Metadata associated with the key.

#[serde(rename = "KeyMetadata")] @@ -363,7 +362,7 @@ pub struct DisconnectCustomKeyStoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisconnectCustomKeyStoreResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -404,7 +403,7 @@ pub struct EncryptRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EncryptResponse { ///

The encrypted plaintext. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not encoded.

#[serde(rename = "CiphertextBlob")] @@ -445,7 +444,7 @@ pub struct GenerateDataKeyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenerateDataKeyResponse { ///

The encrypted copy of the data key. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not encoded.

#[serde(rename = "CiphertextBlob")] @@ -495,7 +494,7 @@ pub struct GenerateDataKeyWithoutPlaintextRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenerateDataKeyWithoutPlaintextResponse { ///

The encrypted data key. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not encoded.

#[serde(rename = "CiphertextBlob")] @@ -525,7 +524,7 @@ pub struct GenerateRandomRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenerateRandomResponse { ///

The random byte string. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not encoded.

#[serde(rename = "Plaintext")] @@ -549,7 +548,7 @@ pub struct GetKeyPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetKeyPolicyResponse { ///

A key policy document in JSON format.

#[serde(rename = "Policy")] @@ -565,7 +564,7 @@ pub struct GetKeyRotationStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetKeyRotationStatusResponse { ///

A Boolean value that specifies whether key rotation is enabled.

#[serde(rename = "KeyRotationEnabled")] @@ -587,7 +586,7 @@ pub struct GetParametersForImportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetParametersForImportResponse { ///

The import token to send in a subsequent ImportKeyMaterial request.

#[serde(rename = "ImportToken")] @@ -632,7 +631,7 @@ pub struct GrantConstraints { ///

Contains information about an entry in a list of grants.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GrantListEntry { ///

A list of key-value pairs that must be present in the encryption context of certain subsequent operations that the grant allows.

#[serde(rename = "Constraints")] @@ -704,12 +703,12 @@ pub struct ImportKeyMaterialRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportKeyMaterialResponse {} ///

Contains information about each entry in the key list.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyListEntry { ///

ARN of the key.

#[serde(rename = "KeyArn")] @@ -723,7 +722,7 @@ pub struct KeyListEntry { ///

Contains metadata about a customer master key (CMK).

This data type is used as a response element for the CreateKey and DescribeKey operations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyMetadata { ///

The twelve-digit account ID of the AWS account that owns the CMK.

#[serde(rename = "AWSAccountId")] @@ -803,7 +802,7 @@ pub struct ListAliasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAliasesResponse { ///

A list of aliases.

#[serde(rename = "Aliases")] @@ -835,7 +834,7 @@ pub struct ListGrantsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGrantsResponse { ///

A list of grants.

#[serde(rename = "Grants")] @@ -867,7 +866,7 @@ pub struct ListKeyPoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListKeyPoliciesResponse { ///

When Truncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent request.

#[serde(rename = "NextMarker")] @@ -896,7 +895,7 @@ pub struct ListKeysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListKeysResponse { ///

A list of customer master keys (CMKs).

#[serde(rename = "Keys")] @@ -928,7 +927,7 @@ pub struct ListResourceTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceTagsResponse { ///

When Truncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent request.

Do not assume or infer any information from this value.

#[serde(rename = "NextMarker")] @@ -1004,7 +1003,7 @@ pub struct ReEncryptRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReEncryptResponse { ///

The reencrypted data. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not encoded.

#[serde(rename = "CiphertextBlob")] @@ -1063,7 +1062,7 @@ pub struct ScheduleKeyDeletionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduleKeyDeletionResponse { ///

The date and time after which AWS KMS deletes the customer master key (CMK).

#[serde(rename = "DeletionDate")] @@ -1136,7 +1135,7 @@ pub struct UpdateCustomKeyStoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCustomKeyStoreResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -4047,10 +4046,7 @@ impl KmsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> KmsClient { - KmsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4064,10 +4060,14 @@ impl KmsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - KmsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> KmsClient { + KmsClient { client, region } } } diff --git a/rusoto/services/lambda/Cargo.toml b/rusoto/services/lambda/Cargo.toml index e6aa9b61a1c..6d6ff7c53f2 100644 --- a/rusoto/services/lambda/Cargo.toml +++ b/rusoto/services/lambda/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_lambda" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/lambda/README.md b/rusoto/services/lambda/README.md index 3e0c68ea177..672c132ab0d 100644 --- a/rusoto/services/lambda/README.md +++ b/rusoto/services/lambda/README.md @@ -23,9 +23,16 @@ To use `rusoto_lambda` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_lambda = "0.40.0" +rusoto_lambda = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/lambda/src/custom/custom_tests.rs b/rusoto/services/lambda/src/custom/custom_tests.rs index bd5552c55c9..1cabcccaf8e 100644 --- a/rusoto/services/lambda/src/custom/custom_tests.rs +++ b/rusoto/services/lambda/src/custom/custom_tests.rs @@ -2,11 +2,13 @@ extern crate rusoto_mock; use bytes::Bytes; -use crate::generated::{GetPolicyRequest, GetPolicyResponse, Lambda, LambdaClient, InvocationRequest}; +use crate::generated::{ + GetPolicyRequest, GetPolicyResponse, InvocationRequest, Lambda, LambdaClient, +}; -use rusoto_core::Region; -use rusoto_core::signature::{SignedRequest,SignedRequestPayload}; use self::rusoto_mock::*; +use rusoto_core::signature::{SignedRequest, SignedRequestPayload}; +use rusoto_core::Region; #[test] fn serialize_get_policy_response() { @@ -14,13 +16,15 @@ fn serialize_get_policy_response() { policy: Some("policy".into()), ..GetPolicyResponse::default() }; - let mock = MockRequestDispatcher::with_status(200) - .with_json_body(policy.clone()); + let mock = MockRequestDispatcher::with_status(200).with_json_body(policy.clone()); let client = LambdaClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); - let result = client.get_policy(GetPolicyRequest { - function_name: "test-func".into(), - ..GetPolicyRequest::default() - }).sync().unwrap(); + let result = client + .get_policy(GetPolicyRequest { + function_name: "test-func".into(), + ..GetPolicyRequest::default() + }) + .sync() + .unwrap(); assert_eq!(result, policy); } @@ -40,7 +44,6 @@ fn should_parse_invocation_response() { panic!("request payload is not a buffer"); } assert_eq!("/2015-03-31/functions/foo/invocations", request.path); - }); let request = InvocationRequest { @@ -53,9 +56,11 @@ fn should_parse_invocation_response() { let client = LambdaClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); let result = client.invoke(request).sync().unwrap(); - assert_eq!(Some(Bytes::from_static(br#"{"arbitrary":"json"}"#)), result.payload); + assert_eq!( + Some(Bytes::from_static(br#"{"arbitrary":"json"}"#)), + result.payload + ); assert_eq!(Some("foo bar baz".to_owned()), result.log_result); assert_eq!(Some("Handled".to_owned()), result.function_error); assert_eq!(Some(200), result.status_code); - -} \ No newline at end of file +} diff --git a/rusoto/services/lambda/src/custom/mod.rs b/rusoto/services/lambda/src/custom/mod.rs index 9a14b939383..e4234693714 100644 --- a/rusoto/services/lambda/src/custom/mod.rs +++ b/rusoto/services/lambda/src/custom/mod.rs @@ -1,2 +1,2 @@ #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/lambda/src/generated.rs b/rusoto/services/lambda/src/generated.rs index fc2ce051535..957dd4d190b 100644 --- a/rusoto/services/lambda/src/generated.rs +++ b/rusoto/services/lambda/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

Limits that are related to concurrency and code storage. All file and storage sizes are in bytes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccountLimit { ///

The maximum size of your function's code and layers when they're extracted.

#[serde(rename = "CodeSizeUnzipped")] @@ -53,7 +52,7 @@ pub struct AccountLimit { ///

The number of functions and amount of storage in use.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccountUsage { ///

The number of Lambda functions.

#[serde(rename = "FunctionCount")] @@ -93,7 +92,7 @@ pub struct AddLayerVersionPermissionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddLayerVersionPermissionResponse { ///

A unique identifier for the current revision of the policy.

#[serde(rename = "RevisionId")] @@ -142,7 +141,7 @@ pub struct AddPermissionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddPermissionResponse { ///

The permission statement that's added to the function policy.

#[serde(rename = "Statement")] @@ -152,7 +151,7 @@ pub struct AddPermissionResponse { ///

Provides configuration information about a Lambda function alias.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AliasConfiguration { ///

The Amazon Resource Name (ARN) of the alias.

#[serde(rename = "AliasArn")] @@ -190,7 +189,7 @@ pub struct AliasRoutingConfiguration { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Concurrency { ///

The number of concurrent executions that are reserved for this function. For more information, see Managing Concurrency.

#[serde(rename = "ReservedConcurrentExecutions")] @@ -235,6 +234,9 @@ pub struct CreateEventSourceMappingRequest { ///

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

#[serde(rename = "FunctionName")] pub function_name: String, + #[serde(rename = "MaximumBatchingWindowInSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_batching_window_in_seconds: Option, ///

The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.

#[serde(rename = "StartingPosition")] #[serde(skip_serializing_if = "Option::is_none")] @@ -373,7 +375,7 @@ pub struct Environment { ///

Error messages for environment variables that couldn't be applied.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentError { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -387,7 +389,7 @@ pub struct EnvironmentError { ///

The results of a configuration update that applied environment variables.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnvironmentResponse { ///

Error messages for environment variables that couldn't be applied.

#[serde(rename = "Error")] @@ -401,7 +403,7 @@ pub struct EnvironmentResponse { ///

A mapping between an AWS resource and an AWS Lambda function. See CreateEventSourceMapping for details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EventSourceMappingConfiguration { ///

The maximum number of items to retrieve in a single batch.

#[serde(rename = "BatchSize")] @@ -423,6 +425,9 @@ pub struct EventSourceMappingConfiguration { #[serde(rename = "LastProcessingResult")] #[serde(skip_serializing_if = "Option::is_none")] pub last_processing_result: Option, + #[serde(rename = "MaximumBatchingWindowInSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_batching_window_in_seconds: Option, ///

The state of the event source mapping. It can be one of the following: Creating, Enabling, Enabled, Disabling, Disabled, Updating, or Deleting.

#[serde(rename = "State")] #[serde(skip_serializing_if = "Option::is_none")] @@ -465,7 +470,7 @@ pub struct FunctionCode { ///

Details about a function's deployment package.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FunctionCodeLocation { ///

A presigned URL that you can use to download the deployment package.

#[serde(rename = "Location")] @@ -479,7 +484,7 @@ pub struct FunctionCodeLocation { ///

Details about a function's configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FunctionConfiguration { ///

The SHA256 hash of the function's deployment package.

#[serde(rename = "CodeSha256")] @@ -567,7 +572,7 @@ pub struct FunctionConfiguration { pub struct GetAccountSettingsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAccountSettingsResponse { ///

Limits that are related to concurrency and code storage.

#[serde(rename = "AccountLimit")] @@ -619,7 +624,7 @@ pub struct GetFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFunctionResponse { ///

The deployment package of the function or version.

#[serde(rename = "Code")] @@ -657,7 +662,7 @@ pub struct GetLayerVersionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLayerVersionPolicyResponse { ///

The policy document.

#[serde(rename = "Policy")] @@ -680,7 +685,7 @@ pub struct GetLayerVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLayerVersionResponse { ///

The layer's compatible runtimes.

#[serde(rename = "CompatibleRuntimes")] @@ -728,7 +733,7 @@ pub struct GetPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPolicyResponse { ///

The resource-based policy.

#[serde(rename = "Policy")] @@ -803,7 +808,7 @@ pub struct InvokeAsyncRequest { ///

A success response (202 Accepted) indicates that the request is queued for invocation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InvokeAsyncResponse { ///

The status code.

#[serde(rename = "Status")] @@ -813,7 +818,7 @@ pub struct InvokeAsyncResponse { ///

An AWS Lambda layer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Layer { ///

The Amazon Resource Name (ARN) of the function layer.

#[serde(rename = "Arn")] @@ -853,7 +858,7 @@ pub struct LayerVersionContentInput { ///

Details about a version of an AWS Lambda layer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LayerVersionContentOutput { ///

The SHA-256 hash of the layer archive.

#[serde(rename = "CodeSha256")] @@ -871,7 +876,7 @@ pub struct LayerVersionContentOutput { ///

Details about a version of an AWS Lambda layer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LayerVersionsListItem { ///

The layer's compatible runtimes.

#[serde(rename = "CompatibleRuntimes")] @@ -901,7 +906,7 @@ pub struct LayerVersionsListItem { ///

Details about an AWS Lambda layer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LayersListItem { ///

The newest version of the layer.

#[serde(rename = "LatestMatchingVersion")] @@ -937,7 +942,7 @@ pub struct ListAliasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAliasesResponse { ///

A list of aliases.

#[serde(rename = "Aliases")] @@ -970,7 +975,7 @@ pub struct ListEventSourceMappingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEventSourceMappingsResponse { ///

A list of event source mappings.

#[serde(rename = "EventSourceMappings")] @@ -1004,7 +1009,7 @@ pub struct ListFunctionsRequest { ///

A list of Lambda functions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFunctionsResponse { ///

A list of Lambda functions.

#[serde(rename = "Functions")] @@ -1036,7 +1041,7 @@ pub struct ListLayerVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLayerVersionsResponse { ///

A list of versions.

#[serde(rename = "LayerVersions")] @@ -1065,7 +1070,7 @@ pub struct ListLayersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLayersResponse { ///

A list of function layers.

#[serde(rename = "Layers")] @@ -1085,7 +1090,7 @@ pub struct ListTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

The function's tags.

#[serde(rename = "Tags")] @@ -1109,7 +1114,7 @@ pub struct ListVersionsByFunctionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVersionsByFunctionResponse { ///

The pagination token that's included if more results are available.

#[serde(rename = "NextMarker")] @@ -1144,7 +1149,7 @@ pub struct PublishLayerVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PublishLayerVersionResponse { ///

The layer's compatible runtimes.

#[serde(rename = "CompatibleRuntimes")] @@ -1265,7 +1270,7 @@ pub struct TracingConfig { ///

The function's AWS X-Ray tracing configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TracingConfigResponse { ///

The tracing mode.

#[serde(rename = "Mode")] @@ -1323,6 +1328,9 @@ pub struct UpdateEventSourceMappingRequest { #[serde(rename = "FunctionName")] #[serde(skip_serializing_if = "Option::is_none")] pub function_name: Option, + #[serde(rename = "MaximumBatchingWindowInSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_batching_window_in_seconds: Option, ///

The identifier of the event source mapping.

#[serde(rename = "UUID")] pub uuid: String, @@ -1442,7 +1450,7 @@ pub struct VpcConfig { ///

The VPC security groups and subnets that are attached to a Lambda function.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VpcConfigResponse { ///

A list of VPC security groups IDs.

#[serde(rename = "SecurityGroupIds")] @@ -4002,10 +4010,7 @@ impl LambdaClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> LambdaClient { - LambdaClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4019,10 +4024,14 @@ impl LambdaClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - LambdaClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> LambdaClient { + LambdaClient { client, region } } } diff --git a/rusoto/services/lex-models/Cargo.toml b/rusoto/services/lex-models/Cargo.toml index bc387880cea..6a54ba11370 100644 --- a/rusoto/services/lex-models/Cargo.toml +++ b/rusoto/services/lex-models/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_lex_models" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/lex-models/README.md b/rusoto/services/lex-models/README.md index cda789f6135..4663659e878 100644 --- a/rusoto/services/lex-models/README.md +++ b/rusoto/services/lex-models/README.md @@ -23,9 +23,16 @@ To use `rusoto_lex_models` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_lex_models = "0.40.0" +rusoto_lex_models = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/lex-models/src/custom/mod.rs b/rusoto/services/lex-models/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/lex-models/src/custom/mod.rs +++ b/rusoto/services/lex-models/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/lex-models/src/generated.rs b/rusoto/services/lex-models/src/generated.rs index 49be1feba59..b7eafb7270b 100644 --- a/rusoto/services/lex-models/src/generated.rs +++ b/rusoto/services/lex-models/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

Provides information about a bot alias.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BotAliasMetadata { ///

The name of the bot to which the alias points.

#[serde(rename = "botName")] @@ -61,7 +60,7 @@ pub struct BotAliasMetadata { ///

Represents an association between an Amazon Lex bot and an external messaging platform.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BotChannelAssociation { ///

An alias pointing to the specific version of the Amazon Lex bot to which this association is being made.

#[serde(rename = "botAlias")] @@ -103,7 +102,7 @@ pub struct BotChannelAssociation { ///

Provides information about a bot. .

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BotMetadata { ///

The date that the bot was created.

#[serde(rename = "createdDate")] @@ -133,7 +132,7 @@ pub struct BotMetadata { ///

Provides metadata for a built-in intent.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BuiltinIntentMetadata { ///

A unique identifier for the built-in intent. To find the signature for an intent, see Standard Built-in Intents in the Alexa Skills Kit.

#[serde(rename = "signature")] @@ -147,7 +146,7 @@ pub struct BuiltinIntentMetadata { ///

Provides information about a slot used in a built-in intent.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BuiltinIntentSlot { ///

A list of the slots defined for the intent.

#[serde(rename = "name")] @@ -157,7 +156,7 @@ pub struct BuiltinIntentSlot { ///

Provides information about a built in slot type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BuiltinSlotTypeMetadata { ///

A unique identifier for the built-in slot type. To find the signature for a slot type, see Slot Type Reference in the Alexa Skills Kit.

#[serde(rename = "signature")] @@ -192,7 +191,7 @@ pub struct CreateBotVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBotVersionResponse { ///

The message that Amazon Lex uses to abort a conversation. For more information, see PutBot.

#[serde(rename = "abortStatement")] @@ -268,7 +267,7 @@ pub struct CreateIntentVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIntentVersionResponse { ///

Checksum of the intent version created.

#[serde(rename = "checksum")] @@ -344,7 +343,7 @@ pub struct CreateSlotTypeVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSlotTypeVersionResponse { ///

Checksum of the $LATEST version of the slot type.

#[serde(rename = "checksum")] @@ -510,7 +509,7 @@ pub struct GetBotAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotAliasResponse { ///

The name of the bot that the alias points to.

#[serde(rename = "botName")] @@ -562,7 +561,7 @@ pub struct GetBotAliasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotAliasesResponse { ///

An array of BotAliasMetadata objects, each describing a bot alias.

#[serde(rename = "BotAliases")] @@ -588,7 +587,7 @@ pub struct GetBotChannelAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotChannelAssociationResponse { ///

An alias pointing to the specific version of the Amazon Lex bot to which this association is being made.

#[serde(rename = "botAlias")] @@ -651,7 +650,7 @@ pub struct GetBotChannelAssociationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotChannelAssociationsResponse { ///

An array of objects, one for each association, that provides information about the Amazon Lex bot and its association with the channel.

#[serde(rename = "botChannelAssociations")] @@ -674,7 +673,7 @@ pub struct GetBotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotResponse { ///

The message that Amazon Lex returns when the user elects to end the conversation without completing it. For more information, see PutBot.

#[serde(rename = "abortStatement")] @@ -754,7 +753,7 @@ pub struct GetBotVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotVersionsResponse { ///

An array of BotMetadata objects, one for each numbered version of the bot plus one for the $LATEST version.

#[serde(rename = "bots")] @@ -783,7 +782,7 @@ pub struct GetBotsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBotsResponse { ///

An array of botMetadata objects, with one entry for each bot.

#[serde(rename = "bots")] @@ -803,7 +802,7 @@ pub struct GetBuiltinIntentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBuiltinIntentResponse { ///

The unique identifier for a built-in intent.

#[serde(rename = "signature")] @@ -840,7 +839,7 @@ pub struct GetBuiltinIntentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBuiltinIntentsResponse { ///

An array of builtinIntentMetadata objects, one for each intent in the response.

#[serde(rename = "intents")] @@ -873,7 +872,7 @@ pub struct GetBuiltinSlotTypesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBuiltinSlotTypesResponse { ///

If the response is truncated, the response includes a pagination token that you can use in your next request to fetch the next page of slot types.

#[serde(rename = "nextToken")] @@ -902,7 +901,7 @@ pub struct GetExportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetExportResponse { ///

The status of the export.

  • IN_PROGRESS - The export is in progress.

  • READY - The export is complete.

  • FAILED - The export could not be completed.

#[serde(rename = "exportStatus")] @@ -942,7 +941,7 @@ pub struct GetImportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetImportResponse { ///

A timestamp for the date and time that the import job was created.

#[serde(rename = "createdDate")] @@ -985,7 +984,7 @@ pub struct GetIntentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntentResponse { ///

Checksum of the intent.

#[serde(rename = "checksum")] @@ -1065,7 +1064,7 @@ pub struct GetIntentVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntentVersionsResponse { ///

An array of IntentMetadata objects, one for each numbered version of the intent plus one for the $LATEST version.

#[serde(rename = "intents")] @@ -1094,7 +1093,7 @@ pub struct GetIntentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIntentsResponse { ///

An array of Intent objects. For more information, see PutBot.

#[serde(rename = "intents")] @@ -1117,7 +1116,7 @@ pub struct GetSlotTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSlotTypeResponse { ///

Checksum of the $LATEST version of the slot type.

#[serde(rename = "checksum")] @@ -1169,7 +1168,7 @@ pub struct GetSlotTypeVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSlotTypeVersionsResponse { ///

A pagination token for fetching the next page of slot type versions. If the response to this call is truncated, Amazon Lex returns a pagination token in the response. To fetch the next page of versions, specify the pagination token in the next request.

#[serde(rename = "nextToken")] @@ -1198,7 +1197,7 @@ pub struct GetSlotTypesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSlotTypesResponse { ///

If the response is truncated, it includes a pagination token that you can specify in your next request to fetch the next page of slot types.

#[serde(rename = "nextToken")] @@ -1224,7 +1223,7 @@ pub struct GetUtterancesViewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetUtterancesViewResponse { ///

The name of the bot for which utterance information was returned.

#[serde(rename = "botName")] @@ -1249,7 +1248,7 @@ pub struct Intent { ///

Provides information about an intent.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IntentMetadata { ///

The date that the intent was created.

#[serde(rename = "createdDate")] @@ -1325,7 +1324,7 @@ pub struct PutBotAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutBotAliasResponse { ///

The name of the bot that the alias points to.

#[serde(rename = "botName")] @@ -1406,7 +1405,7 @@ pub struct PutBotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutBotResponse { ///

The message that Amazon Lex uses to abort a conversation. For more information, see PutBot.

#[serde(rename = "abortStatement")] @@ -1528,7 +1527,7 @@ pub struct PutIntentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutIntentResponse { ///

Checksum of the $LATESTversion of the intent created or updated.

#[serde(rename = "checksum")] @@ -1622,7 +1621,7 @@ pub struct PutSlotTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutSlotTypeResponse { ///

Checksum of the $LATEST version of the slot type.

#[serde(rename = "checksum")] @@ -1711,7 +1710,7 @@ pub struct Slot { ///

Provides information about a slot type..

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SlotTypeMetadata { ///

The date that the slot type was created.

#[serde(rename = "createdDate")] @@ -1754,7 +1753,7 @@ pub struct StartImportRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartImportResponse { ///

A timestamp for the date and time that the import job was requested.

#[serde(rename = "createdDate")] @@ -1796,7 +1795,7 @@ pub struct Statement { ///

Provides information about a single utterance that was made to your bot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UtteranceData { ///

The number of times that the utterance was processed.

#[serde(rename = "count")] @@ -1822,7 +1821,7 @@ pub struct UtteranceData { ///

Provides a list of utterances that have been made to a specific version of your bot. The list contains a maximum of 100 utterances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UtteranceList { ///

The version of the bot that processed the list.

#[serde(rename = "botVersion")] @@ -4025,10 +4024,7 @@ impl LexModelsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> LexModelsClient { - LexModelsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4042,10 +4038,14 @@ impl LexModelsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - LexModelsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> LexModelsClient { + LexModelsClient { client, region } } } diff --git a/rusoto/services/lex-runtime/Cargo.toml b/rusoto/services/lex-runtime/Cargo.toml index 18f9ed2b4c8..abdae4686ba 100644 --- a/rusoto/services/lex-runtime/Cargo.toml +++ b/rusoto/services/lex-runtime/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_lex_runtime" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/lex-runtime/README.md b/rusoto/services/lex-runtime/README.md index d1f434abb6b..926d5ed5bfd 100644 --- a/rusoto/services/lex-runtime/README.md +++ b/rusoto/services/lex-runtime/README.md @@ -23,9 +23,16 @@ To use `rusoto_lex_runtime` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_lex_runtime = "0.40.0" +rusoto_lex_runtime = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/lex-runtime/src/custom/custom_tests.rs b/rusoto/services/lex-runtime/src/custom/custom_tests.rs new file mode 100644 index 00000000000..94b944ffba1 --- /dev/null +++ b/rusoto/services/lex-runtime/src/custom/custom_tests.rs @@ -0,0 +1,52 @@ +extern crate rusoto_mock; + +use crate::generated::{LexRuntime, LexRuntimeClient, PostTextRequest, PostTextResponse}; +use rusoto_core::Region; +use std::collections::HashMap; + +use self::rusoto_mock::*; + +#[test] +fn test_post_text_resposnse_serialization() { + let mock_resp_body = r#"{ + "dialogState": "ElicitSlot", + "intentName": "BookCar", + "message": "In what city do you need to rent a car?", + "messageFormat": "PlainText", + "responseCard": null, + "sessionAttributes": {}, + "slotToElicit": "PickUpCity", + "slots": { + "CarType": null, + "PickUpCity": "Boston" + } + }"#; + let mock_request = MockRequestDispatcher::with_status(200).with_body(mock_resp_body); + + let lex_client = + LexRuntimeClient::new_with(mock_request, MockCredentialsProvider, Region::UsEast1); + + let post_text_req = PostTextRequest { + input_text: "Book a car".to_owned(), + user_id: "rs".to_owned(), + ..Default::default() + }; + + let mut slots = HashMap::new(); + slots.insert("CarType".to_owned(), None); + slots.insert("PickUpCity".to_owned(), Some("Boston".to_owned())); + + let expected = PostTextResponse { + dialog_state: Some("ElicitSlot".to_owned()), + intent_name: Some("BookCar".to_owned()), + message: Some("In what city do you need to rent a car?".to_owned()), + message_format: Some("PlainText".to_owned()), + slot_to_elicit: Some("PickUpCity".to_owned()), + slots: Some(slots), + response_card: None, + session_attributes: Some(HashMap::new()), + }; + + let result: PostTextResponse = lex_client.post_text(post_text_req).sync().unwrap(); + assert_eq!(result, expected); +} diff --git a/rusoto/services/lex-runtime/src/custom/mod.rs b/rusoto/services/lex-runtime/src/custom/mod.rs index e69de29bb2d..e4234693714 100644 --- a/rusoto/services/lex-runtime/src/custom/mod.rs +++ b/rusoto/services/lex-runtime/src/custom/mod.rs @@ -0,0 +1,2 @@ +#[cfg(test)] +mod custom_tests; diff --git a/rusoto/services/lex-runtime/src/generated.rs b/rusoto/services/lex-runtime/src/generated.rs index 893237915fd..c312f86f5aa 100644 --- a/rusoto/services/lex-runtime/src/generated.rs +++ b/rusoto/services/lex-runtime/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Represents an option to be shown on the client platform (Facebook, Slack, etc.)

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Button { ///

Text that is visible to the user on the button.

#[serde(rename = "text")] @@ -36,9 +35,75 @@ pub struct Button { pub value: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteSessionRequest { + ///

The alias in use for the bot that contains the session data.

+ #[serde(rename = "botAlias")] + pub bot_alias: String, + ///

The name of the bot that contains the session data.

+ #[serde(rename = "botName")] + pub bot_name: String, + ///

The identifier of the user associated with the session data.

+ #[serde(rename = "userId")] + pub user_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteSessionResponse { + ///

The alias in use for the bot associated with the session data.

+ #[serde(rename = "botAlias")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bot_alias: Option, + ///

The name of the bot associated with the session data.

+ #[serde(rename = "botName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bot_name: Option, + ///

The unique identifier for the session.

+ #[serde(rename = "sessionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_id: Option, + ///

The ID of the client application user.

+ #[serde(rename = "userId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_id: Option, +} + +///

Describes the next action that the bot should take in its interaction with the user and provides information about the context in which the action takes place. Use the DialogAction data type to set the interaction to a specific state, or to return the interaction to a previous state.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DialogAction { + ///

The fulfillment state of the intent. The possible values are:

  • Failed - The Lambda function associated with the intent failed to fulfill the intent.

  • Fulfilled - The intent has fulfilled by the Lambda function associated with the intent.

  • ReadyForFulfillment - All of the information necessary for the intent is present and the intent ready to be fulfilled by the client application.

+ #[serde(rename = "fulfillmentState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub fulfillment_state: Option, + ///

The name of the intent.

+ #[serde(rename = "intentName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub intent_name: Option, + ///

The message that should be shown to the user. If you don't specify a message, Amazon Lex will use the message configured for the intent.

+ #[serde(rename = "message")] + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + ///
  • PlainText - The message contains plain UTF-8 text.

  • CustomPayload - The message is a custom format for the client.

  • SSML - The message contains text formatted for voice output.

  • Composite - The message contains an escaped JSON object containing one or more messages. For more information, see Message Groups.

+ #[serde(rename = "messageFormat")] + #[serde(skip_serializing_if = "Option::is_none")] + pub message_format: Option, + ///

The name of the slot that should be elicited from the user.

+ #[serde(rename = "slotToElicit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub slot_to_elicit: Option, + ///

Map of the slots that have been gathered and their values.

+ #[serde(rename = "slots")] + #[serde(skip_serializing_if = "Option::is_none")] + pub slots: Option<::std::collections::HashMap>, + ///

The next action that the bot should take in its interaction with the user. The possible values are:

  • ConfirmIntent - The next action is asking the user if the intent is complete and ready to be fulfilled. This is a yes/no question such as "Place the order?"

  • Close - Indicates that the there will not be a response from the user. For example, the statement "Your order has been placed" does not require a response.

  • Delegate - The next action is determined by Amazon Lex.

  • ElicitIntent - The next action is to determine the intent that the user wants to fulfill.

  • ElicitSlot - The next action is to elicit a slot value from the user.

+ #[serde(rename = "type")] + pub type_: String, +} + ///

Represents an option rendered to the user when a prompt is shown. It could be an image, a button, a link, or text.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenericAttachment { ///

The URL of an attachment to the response card.

#[serde(rename = "attachmentLinkUrl")] @@ -62,9 +127,72 @@ pub struct GenericAttachment { pub title: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetSessionRequest { + ///

The alias in use for the bot that contains the session data.

+ #[serde(rename = "botAlias")] + pub bot_alias: String, + ///

The name of the bot that contains the session data.

+ #[serde(rename = "botName")] + pub bot_name: String, + ///

The ID of the client application user. Amazon Lex uses this to identify a user's conversation with your bot.

+ #[serde(rename = "userId")] + pub user_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetSessionResponse { + ///

Describes the current state of the bot.

+ #[serde(rename = "dialogAction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dialog_action: Option, + ///

An array of information about the intents used in the session. The array can contain a maximum of three summaries. If more than three intents are used in the session, the recentIntentSummaryView operation contains information about the last three intents used.

+ #[serde(rename = "recentIntentSummaryView")] + #[serde(skip_serializing_if = "Option::is_none")] + pub recent_intent_summary_view: Option>, + ///

Map of key/value pairs representing the session-specific context information. It contains application information passed between Amazon Lex and a client application.

+ #[serde(rename = "sessionAttributes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_attributes: Option<::std::collections::HashMap>, + ///

A unique identifier for the session.

+ #[serde(rename = "sessionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_id: Option, +} + +///

Provides information about the state of an intent. You can use this information to get the current state of an intent so that you can process the intent, or so that you can return the intent to its previous state.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct IntentSummary { + ///

The status of the intent after the user responds to the confirmation prompt. If the user confirms the intent, Amazon Lex sets this field to Confirmed. If the user denies the intent, Amazon Lex sets this value to Denied. The possible values are:

  • Confirmed - The user has responded "Yes" to the confirmation prompt, confirming that the intent is complete and that it is ready to be fulfilled.

  • Denied - The user has responded "No" to the confirmation prompt.

  • None - The user has never been prompted for confirmation; or, the user was prompted but did not confirm or deny the prompt.

+ #[serde(rename = "confirmationStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub confirmation_status: Option, + ///

The next action that the bot should take in its interaction with the user. The possible values are:

  • ConfirmIntent - The next action is asking the user if the intent is complete and ready to be fulfilled. This is a yes/no question such as "Place the order?"

  • Close - Indicates that the there will not be a response from the user. For example, the statement "Your order has been placed" does not require a response.

  • ElicitIntent - The next action is to determine the intent that the user wants to fulfill.

  • ElicitSlot - The next action is to elicit a slot value from the user.

+ #[serde(rename = "dialogActionType")] + pub dialog_action_type: String, + ///

The fulfillment state of the intent. The possible values are:

  • Failed - The Lambda function associated with the intent failed to fulfill the intent.

  • Fulfilled - The intent has fulfilled by the Lambda function associated with the intent.

  • ReadyForFulfillment - All of the information necessary for the intent is present and the intent ready to be fulfilled by the client application.

+ #[serde(rename = "fulfillmentState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub fulfillment_state: Option, + ///

The name of the intent.

+ #[serde(rename = "intentName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub intent_name: Option, + ///

The next slot to elicit from the user. If there is not slot to elicit, the field is blank.

+ #[serde(rename = "slotToElicit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub slot_to_elicit: Option, + ///

Map of the slots that have been gathered and their values.

+ #[serde(rename = "slots")] + #[serde(skip_serializing_if = "Option::is_none")] + pub slots: Option<::std::collections::HashMap>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PostContentRequest { - ///

You pass this value as the Accept HTTP header.

The message Amazon Lex returns in the response can be either text or speech based on the Accept HTTP header value in the request.

  • If the value is text/plain; charset=utf-8, Amazon Lex returns text in the response.

  • If the value begins with audio/, Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech (using the configuration you specified in the Accept header). For example, if you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG format.

    The following are the accepted values:

    • audio/mpeg

    • audio/ogg

    • audio/pcm

    • text/plain; charset=utf-8

    • audio/* (defaults to mpeg)

+ ///

You pass this value as the Accept HTTP header.

The message Amazon Lex returns in the response can be either text or speech based on the Accept HTTP header value in the request.

  • If the value is text/plain; charset=utf-8, Amazon Lex returns text in the response.

  • If the value begins with audio/, Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech (using the configuration you specified in the Accept header). For example, if you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG format.

  • If the value is audio/pcm, the speech returned is audio/pcm in 16-bit, little endian format.

  • The following are the accepted values:

    • audio/mpeg

    • audio/ogg

    • audio/pcm

    • text/plain; charset=utf-8

    • audio/* (defaults to mpeg)

#[serde(rename = "accept")] #[serde(skip_serializing_if = "Option::is_none")] pub accept: Option, @@ -85,11 +213,11 @@ pub struct PostContentRequest { default )] pub input_stream: bytes::Bytes, - ///

You pass this value as the x-amz-lex-request-attributes HTTP header.

Request-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the requestAttributes and sessionAttributes headers is limited to 12 KB.

The namespace x-amz-lex: is reserved for special attributes. Don't create any request attributes with the prefix x-amz-lex:.

For more information, see Setting Request Attributes.

+ ///

You pass this value as the x-amz-lex-request-attributes HTTP header.

Request-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the requestAttributes and sessionAttributes headers is limited to 12 KB.

The namespace x-amz-lex: is reserved for special attributes. Don't create any request attributes with the prefix x-amz-lex:.

For more information, see Setting Request Attributes.

#[serde(rename = "requestAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub request_attributes: Option, - ///

You pass this value as the x-amz-lex-session-attributes HTTP header.

Application-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the sessionAttributes and requestAttributes headers is limited to 12 KB.

For more information, see Setting Session Attributes.

+ ///

You pass this value as the x-amz-lex-session-attributes HTTP header.

Application-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the sessionAttributes and requestAttributes headers is limited to 12 KB.

For more information, see Setting Session Attributes.

#[serde(rename = "sessionAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub session_attributes: Option, @@ -110,7 +238,7 @@ pub struct PostContentResponse { pub input_transcript: Option, ///

Current user intent that Amazon Lex is aware of.

pub intent_name: Option, - ///

The message to convey to the user. The message can come from the bot's configuration or from a Lambda function.

If the intent is not configured with a Lambda function, or if the Lambda function returned Delegate as the dialogAction.type its response, Amazon Lex decides on the next course of action and selects an appropriate message from the bot's configuration based on the current interaction context. For example, if Amazon Lex isn't able to understand user input, it uses a clarification prompt message.

When you create an intent you can assign messages to groups. When messages are assigned to groups Amazon Lex returns one message from each group in the response. The message field is an escaped JSON string containing the messages. For more information about the structure of the JSON string returned, see msg-prompts-formats.

If the Lambda function returns a message, Amazon Lex passes it to the client in its response.

+ ///

The message to convey to the user. The message can come from the bot's configuration or from a Lambda function.

If the intent is not configured with a Lambda function, or if the Lambda function returned Delegate as the dialogAction.type in its response, Amazon Lex decides on the next course of action and selects an appropriate message from the bot's configuration based on the current interaction context. For example, if Amazon Lex isn't able to understand user input, it uses a clarification prompt message.

When you create an intent you can assign messages to groups. When messages are assigned to groups Amazon Lex returns one message from each group in the response. The message field is an escaped JSON string containing the messages. For more information about the structure of the JSON string returned, see msg-prompts-formats.

If the Lambda function returns a message, Amazon Lex passes it to the client in its response.

pub message: Option, ///

The format of the response message. One of the following values:

  • PlainText - The message contains plain UTF-8 text.

  • CustomPayload - The message is a custom format for the client.

  • SSML - The message contains text formatted for voice output.

  • Composite - The message contains an escaped JSON object containing one or more messages from the groups that messages were assigned to when the intent was created.

pub message_format: Option, @@ -118,7 +246,7 @@ pub struct PostContentResponse { pub session_attributes: Option, ///

If the dialogState value is ElicitSlot, returns the name of the slot for which Amazon Lex is eliciting a value.

pub slot_to_elicit: Option, - ///

Map of zero or more intent slots (name/value pairs) Amazon Lex detected from the user input during the conversation.

Amazon Lex creates a resolution list containing likely values for a slot. The value that it returns is determined by the valueSelectionStrategy selected when the slot type was created or updated. If valueSelectionStrategy is set to ORIGINAL_VALUE, the value provided by the user is returned, if the user value is similar to the slot values. If valueSelectionStrategy is set to TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if there is no resolution list, null. If you don't specify a valueSelectionStrategy, the default is ORIGINAL_VALUE.

+ ///

Map of zero or more intent slots (name/value pairs) Amazon Lex detected from the user input during the conversation. The field is base-64 encoded.

Amazon Lex creates a resolution list containing likely values for a slot. The value that it returns is determined by the valueSelectionStrategy selected when the slot type was created or updated. If valueSelectionStrategy is set to ORIGINAL_VALUE, the value provided by the user is returned, if the user value is similar to the slot values. If valueSelectionStrategy is set to TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if there is no resolution list, null. If you don't specify a valueSelectionStrategy, the default is ORIGINAL_VALUE.

pub slots: Option, } @@ -133,11 +261,11 @@ pub struct PostTextRequest { ///

The text that the user entered (Amazon Lex interprets this text).

#[serde(rename = "inputText")] pub input_text: String, - ///

Request-specific information passed between Amazon Lex and a client application.

The namespace x-amz-lex: is reserved for special attributes. Don't create any request attributes with the prefix x-amz-lex:.

For more information, see Setting Request Attributes.

+ ///

Request-specific information passed between Amazon Lex and a client application.

The namespace x-amz-lex: is reserved for special attributes. Don't create any request attributes with the prefix x-amz-lex:.

For more information, see Setting Request Attributes.

#[serde(rename = "requestAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub request_attributes: Option<::std::collections::HashMap>, - ///

Application-specific information passed between Amazon Lex and a client application.

For more information, see Setting Session Attributes.

+ ///

Application-specific information passed between Amazon Lex and a client application.

For more information, see Setting Session Attributes.

#[serde(rename = "sessionAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub session_attributes: Option<::std::collections::HashMap>, @@ -147,7 +275,7 @@ pub struct PostTextRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PostTextResponse { ///

Identifies the current state of the user interaction. Amazon Lex returns one of the following values as dialogState. The client can optionally use this information to customize the user interface.

  • ElicitIntent - Amazon Lex wants to elicit user intent.

    For example, a user might utter an intent ("I want to order a pizza"). If Amazon Lex cannot infer the user intent from this utterance, it will return this dialogState.

  • ConfirmIntent - Amazon Lex is expecting a "yes" or "no" response.

    For example, Amazon Lex wants user confirmation before fulfilling an intent.

    Instead of a simple "yes" or "no," a user might respond with additional information. For example, "yes, but make it thick crust pizza" or "no, I want to order a drink". Amazon Lex can process such additional information (in these examples, update the crust type slot value, or change intent from OrderPizza to OrderDrink).

  • ElicitSlot - Amazon Lex is expecting a slot value for the current intent.

    For example, suppose that in the response Amazon Lex sends this message: "What size pizza would you like?". A user might reply with the slot value (e.g., "medium"). The user might also provide additional information in the response (e.g., "medium thick crust pizza"). Amazon Lex can process such additional information appropriately.

  • Fulfilled - Conveys that the Lambda function configured for the intent has successfully fulfilled the intent.

  • ReadyForFulfillment - Conveys that the client has to fulfill the intent.

  • Failed - Conveys that the conversation with the user failed.

    This can happen for various reasons including that the user did not provide an appropriate response to prompts from the service (you can configure how many times Amazon Lex can prompt a user for specific information), or the Lambda function failed to fulfill the intent.

#[serde(rename = "dialogState")] @@ -180,12 +308,61 @@ pub struct PostTextResponse { ///

The intent slots that Amazon Lex detected from the user input in the conversation.

Amazon Lex creates a resolution list containing likely values for a slot. The value that it returns is determined by the valueSelectionStrategy selected when the slot type was created or updated. If valueSelectionStrategy is set to ORIGINAL_VALUE, the value provided by the user is returned, if the user value is similar to the slot values. If valueSelectionStrategy is set to TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if there is no resolution list, null. If you don't specify a valueSelectionStrategy, the default is ORIGINAL_VALUE.

#[serde(rename = "slots")] #[serde(skip_serializing_if = "Option::is_none")] - pub slots: Option<::std::collections::HashMap>, + pub slots: Option<::std::collections::HashMap>>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct PutSessionRequest { + ///

The message that Amazon Lex returns in the response can be either text or speech based depending on the value of this field.

  • If the value is text/plain; charset=utf-8, Amazon Lex returns text in the response.

  • If the value begins with audio/, Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech in the configuration that you specify. For example, if you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG format.

  • If the value is audio/pcm, the speech is returned as audio/pcm in 16-bit, little endian format.

  • The following are the accepted values:

    • audio/mpeg

    • audio/ogg

    • audio/pcm

    • audio/* (defaults to mpeg)

    • text/plain; charset=utf-8

+ #[serde(rename = "accept")] + #[serde(skip_serializing_if = "Option::is_none")] + pub accept: Option, + ///

The alias in use for the bot that contains the session data.

+ #[serde(rename = "botAlias")] + pub bot_alias: String, + ///

The name of the bot that contains the session data.

+ #[serde(rename = "botName")] + pub bot_name: String, + ///

Sets the next action that the bot should take to fulfill the conversation.

+ #[serde(rename = "dialogAction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dialog_action: Option, + ///

Map of key/value pairs representing the session-specific context information. It contains application information passed between Amazon Lex and a client application.

+ #[serde(rename = "sessionAttributes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_attributes: Option<::std::collections::HashMap>, + ///

The ID of the client application user. Amazon Lex uses this to identify a user's conversation with your bot.

+ #[serde(rename = "userId")] + pub user_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct PutSessionResponse { + ///

The audio version of the message to convey to the user.

+ pub audio_stream: Option, + ///

Content type as specified in the Accept HTTP header in the request.

+ pub content_type: Option, + ///

  • ConfirmIntent - Amazon Lex is expecting a "yes" or "no" response to confirm the intent before fulfilling an intent.

  • ElicitIntent - Amazon Lex wants to elicit the user's intent.

  • ElicitSlot - Amazon Lex is expecting the value of a slot for the current intent.

  • Failed - Conveys that the conversation with the user has failed. This can happen for various reasons, including the user does not provide an appropriate response to prompts from the service, or if the Lambda function fails to fulfill the intent.

  • Fulfilled - Conveys that the Lambda function has sucessfully fulfilled the intent.

  • ReadyForFulfillment - Conveys that the client has to fulfill the intent.

+ pub dialog_state: Option, + ///

The name of the current intent.

+ pub intent_name: Option, + ///

The next message that should be presented to the user.

+ pub message: Option, + ///

The format of the response message. One of the following values:

  • PlainText - The message contains plain UTF-8 text.

  • CustomPayload - The message is a custom format for the client.

  • SSML - The message contains text formatted for voice output.

  • Composite - The message contains an escaped JSON object containing one or more messages from the groups that messages were assigned to when the intent was created.

+ pub message_format: Option, + ///

Map of key/value pairs representing session-specific context information.

+ pub session_attributes: Option, + ///

A unique identifier for the session.

+ pub session_id: Option, + ///

If the dialogState is ElicitSlot, returns the name of the slot for which Amazon Lex is eliciting a value.

+ pub slot_to_elicit: Option, + ///

Map of zero or more intent slots Amazon Lex detected from the user input during the conversation.

Amazon Lex creates a resolution list containing likely values for a slot. The value that it returns is determined by the valueSelectionStrategy selected when the slot type was created or updated. If valueSelectionStrategy is set to ORIGINAL_VALUE, the value provided by the user is returned, if the user value is similar to the slot values. If valueSelectionStrategy is set to TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if there is no resolution list, null. If you don't specify a valueSelectionStrategy the default is ORIGINAL_VALUE.

+ pub slots: Option, } ///

If you configure a response card when creating your bots, Amazon Lex substitutes the session attributes and slot values that are available, and then returns it. The response card can also come from a Lambda function ( dialogCodeHook and fulfillmentActivity on an intent).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResponseCard { ///

The content type of the response.

#[serde(rename = "contentType")] @@ -201,6 +378,114 @@ pub struct ResponseCard { pub version: Option, } +/// Errors returned by DeleteSession +#[derive(Debug, PartialEq)] +pub enum DeleteSessionError { + ///

Request validation failed, there is no usable message in the context, or the bot build failed, is still in progress, or contains unbuilt changes.

+ BadRequest(String), + ///

Two clients are using the same AWS account, Amazon Lex bot, and user ID.

+ Conflict(String), + ///

Internal service error. Retry the call.

+ InternalFailure(String), + ///

Exceeded a limit.

+ LimitExceeded(String), + ///

The resource (such as the Amazon Lex bot or an alias) that is referred to is not found.

+ NotFound(String), +} + +impl DeleteSessionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(DeleteSessionError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(DeleteSessionError::Conflict(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(DeleteSessionError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DeleteSessionError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(DeleteSessionError::NotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteSessionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteSessionError { + fn description(&self) -> &str { + match *self { + DeleteSessionError::BadRequest(ref cause) => cause, + DeleteSessionError::Conflict(ref cause) => cause, + DeleteSessionError::InternalFailure(ref cause) => cause, + DeleteSessionError::LimitExceeded(ref cause) => cause, + DeleteSessionError::NotFound(ref cause) => cause, + } + } +} +/// Errors returned by GetSession +#[derive(Debug, PartialEq)] +pub enum GetSessionError { + ///

Request validation failed, there is no usable message in the context, or the bot build failed, is still in progress, or contains unbuilt changes.

+ BadRequest(String), + ///

Internal service error. Retry the call.

+ InternalFailure(String), + ///

Exceeded a limit.

+ LimitExceeded(String), + ///

The resource (such as the Amazon Lex bot or an alias) that is referred to is not found.

+ NotFound(String), +} + +impl GetSessionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(GetSessionError::BadRequest(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(GetSessionError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetSessionError::LimitExceeded(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(GetSessionError::NotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetSessionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetSessionError { + fn description(&self) -> &str { + match *self { + GetSessionError::BadRequest(ref cause) => cause, + GetSessionError::InternalFailure(ref cause) => cause, + GetSessionError::LimitExceeded(ref cause) => cause, + GetSessionError::NotFound(ref cause) => cause, + } + } +} /// Errors returned by PostContent #[derive(Debug, PartialEq)] pub enum PostContentError { @@ -369,16 +654,109 @@ impl Error for PostTextError { } } } +/// Errors returned by PutSession +#[derive(Debug, PartialEq)] +pub enum PutSessionError { + ///

Either the Amazon Lex bot is still building, or one of the dependent services (Amazon Polly, AWS Lambda) failed with an internal service error.

+ BadGateway(String), + ///

Request validation failed, there is no usable message in the context, or the bot build failed, is still in progress, or contains unbuilt changes.

+ BadRequest(String), + ///

Two clients are using the same AWS account, Amazon Lex bot, and user ID.

+ Conflict(String), + ///

One of the dependencies, such as AWS Lambda or Amazon Polly, threw an exception. For example,

  • If Amazon Lex does not have sufficient permissions to call a Lambda function.

  • If a Lambda function takes longer than 30 seconds to execute.

  • If a fulfillment Lambda function returns a Delegate dialog action without removing any slot values.

+ DependencyFailed(String), + ///

Internal service error. Retry the call.

+ InternalFailure(String), + ///

Exceeded a limit.

+ LimitExceeded(String), + ///

The accept header in the request does not have a valid value.

+ NotAcceptable(String), + ///

The resource (such as the Amazon Lex bot or an alias) that is referred to is not found.

+ NotFound(String), +} + +impl PutSessionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "BadGatewayException" => { + return RusotoError::Service(PutSessionError::BadGateway(err.msg)) + } + "BadRequestException" => { + return RusotoError::Service(PutSessionError::BadRequest(err.msg)) + } + "ConflictException" => { + return RusotoError::Service(PutSessionError::Conflict(err.msg)) + } + "DependencyFailedException" => { + return RusotoError::Service(PutSessionError::DependencyFailed(err.msg)) + } + "InternalFailureException" => { + return RusotoError::Service(PutSessionError::InternalFailure(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(PutSessionError::LimitExceeded(err.msg)) + } + "NotAcceptableException" => { + return RusotoError::Service(PutSessionError::NotAcceptable(err.msg)) + } + "NotFoundException" => { + return RusotoError::Service(PutSessionError::NotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for PutSessionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for PutSessionError { + fn description(&self) -> &str { + match *self { + PutSessionError::BadGateway(ref cause) => cause, + PutSessionError::BadRequest(ref cause) => cause, + PutSessionError::Conflict(ref cause) => cause, + PutSessionError::DependencyFailed(ref cause) => cause, + PutSessionError::InternalFailure(ref cause) => cause, + PutSessionError::LimitExceeded(ref cause) => cause, + PutSessionError::NotAcceptable(ref cause) => cause, + PutSessionError::NotFound(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the Amazon Lex Runtime Service API. Amazon Lex Runtime Service clients implement this trait. pub trait LexRuntime { - ///

Sends user input (text or speech) to Amazon Lex. Clients use this API to send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot.

The PostContent operation supports audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher speech recognition accuracy in telephone audio applications.

In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages:

  • For a user input "I would like a pizza," Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?".

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to get user confirmation: "Order the pizza?".

  • After the user replies "Yes" to the confirmation prompt, Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the message, Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • x-amz-lex-dialog-state header set to ElicitSlot

    • x-amz-lex-intent-name header set to the intent name in the current context

    • x-amz-lex-slot-to-elicit header set to the slot name for which the message is eliciting information

    • x-amz-lex-slots header set to a map of slots configured for the intent with their current values

  • If the message is a confirmation prompt, the x-amz-lex-dialog-state header is set to Confirmation and the x-amz-lex-slot-to-elicit header is omitted.

  • If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the x-amz-dialog-state header is set to ElicitIntent and the x-amz-slot-to-elicit header is omitted.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

+ ///

Removes session information for a specified bot, alias, and user ID.

+ fn delete_session( + &self, + input: DeleteSessionRequest, + ) -> RusotoFuture; + + ///

Returns session information for a specified bot, alias, and user ID.

+ fn get_session( + &self, + input: GetSessionRequest, + ) -> RusotoFuture; + + ///

Sends user input (text or speech) to Amazon Lex. Clients use this API to send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot.

The PostContent operation supports audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher speech recognition accuracy in telephone audio applications.

In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages:

  • For a user input "I would like a pizza," Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?".

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to get user confirmation: "Order the pizza?".

  • After the user replies "Yes" to the confirmation prompt, Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the message, Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • x-amz-lex-dialog-state header set to ElicitSlot

    • x-amz-lex-intent-name header set to the intent name in the current context

    • x-amz-lex-slot-to-elicit header set to the slot name for which the message is eliciting information

    • x-amz-lex-slots header set to a map of slots configured for the intent with their current values

  • If the message is a confirmation prompt, the x-amz-lex-dialog-state header is set to Confirmation and the x-amz-lex-slot-to-elicit header is omitted.

  • If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the x-amz-dialog-state header is set to ElicitIntent and the x-amz-slot-to-elicit header is omitted.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

fn post_content( &self, input: PostContentRequest, ) -> RusotoFuture; - ///

Sends user input (text-only) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot.

In response, Amazon Lex returns the next message to convey to the user an optional responseCard to display. Consider the following example messages:

  • For a user input "I would like a pizza", Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?"

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to obtain user confirmation "Proceed with the pizza order?".

  • After the user replies to a confirmation prompt with a "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a "yes" or "no" user response. In addition to the message, Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the slotToElicit, dialogState, intentName, and slots fields in the response. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • dialogState set to ElicitSlot

    • intentName set to the intent name in the current context

    • slotToElicit set to the slot name for which the message is eliciting information

    • slots set to a map of slots, configured for the intent, with currently known values

  • If the message is a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit is set to null.

  • If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the dialogState is set to ElicitIntent and slotToElicit is set to null.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

+ ///

Sends user input (text or SSML) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot.

In response, Amazon Lex returns the next message to convey to the user an optional responseCard to display. Consider the following example messages:

  • For a user input "I would like a pizza", Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?"

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to obtain user confirmation "Proceed with the pizza order?".

  • After the user replies to a confirmation prompt with a "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a "yes" or "no" user response. In addition to the message, Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the slotToElicit, dialogState, intentName, and slots fields in the response. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • dialogState set to ElicitSlot

    • intentName set to the intent name in the current context

    • slotToElicit set to the slot name for which the message is eliciting information

    • slots set to a map of slots, configured for the intent, with currently known values

  • If the message is a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit is set to null.

  • If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the dialogState is set to ElicitIntent and slotToElicit is set to null.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

fn post_text(&self, input: PostTextRequest) -> RusotoFuture; + + ///

Creates a new session or modifies an existing session with an Amazon Lex bot. Use this operation to enable your application to set the state of the bot.

For more information, see Managing Sessions.

+ fn put_session( + &self, + input: PutSessionRequest, + ) -> RusotoFuture; } /// A client for the Amazon Lex Runtime Service API. #[derive(Clone)] @@ -392,10 +770,7 @@ impl LexRuntimeClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> LexRuntimeClient { - LexRuntimeClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -409,15 +784,91 @@ impl LexRuntimeClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - LexRuntimeClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> LexRuntimeClient { + LexRuntimeClient { client, region } } } impl LexRuntime for LexRuntimeClient { - ///

Sends user input (text or speech) to Amazon Lex. Clients use this API to send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot.

The PostContent operation supports audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher speech recognition accuracy in telephone audio applications.

In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages:

  • For a user input "I would like a pizza," Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?".

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to get user confirmation: "Order the pizza?".

  • After the user replies "Yes" to the confirmation prompt, Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the message, Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • x-amz-lex-dialog-state header set to ElicitSlot

    • x-amz-lex-intent-name header set to the intent name in the current context

    • x-amz-lex-slot-to-elicit header set to the slot name for which the message is eliciting information

    • x-amz-lex-slots header set to a map of slots configured for the intent with their current values

  • If the message is a confirmation prompt, the x-amz-lex-dialog-state header is set to Confirmation and the x-amz-lex-slot-to-elicit header is omitted.

  • If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the x-amz-dialog-state header is set to ElicitIntent and the x-amz-slot-to-elicit header is omitted.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

+ ///

Removes session information for a specified bot, alias, and user ID.

+ fn delete_session( + &self, + input: DeleteSessionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/bot/{bot_name}/alias/{bot_alias}/user/{user_id}/session", + bot_alias = input.bot_alias, + bot_name = input.bot_name, + user_id = input.user_id + ); + + let mut request = SignedRequest::new("DELETE", "lex", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("runtime.lex".to_string()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteSessionError::from_response(response))), + ) + } + }) + } + + ///

Returns session information for a specified bot, alias, and user ID.

+ fn get_session( + &self, + input: GetSessionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/bot/{bot_name}/alias/{bot_alias}/user/{user_id}/session", + bot_alias = input.bot_alias, + bot_name = input.bot_name, + user_id = input.user_id + ); + + let mut request = SignedRequest::new("GET", "lex", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("runtime.lex".to_string()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetSessionError::from_response(response))), + ) + } + }) + } + + ///

Sends user input (text or speech) to Amazon Lex. Clients use this API to send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot.

The PostContent operation supports audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher speech recognition accuracy in telephone audio applications.

In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages:

  • For a user input "I would like a pizza," Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?".

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to get user confirmation: "Order the pizza?".

  • After the user replies "Yes" to the confirmation prompt, Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the message, Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • x-amz-lex-dialog-state header set to ElicitSlot

    • x-amz-lex-intent-name header set to the intent name in the current context

    • x-amz-lex-slot-to-elicit header set to the slot name for which the message is eliciting information

    • x-amz-lex-slots header set to a map of slots configured for the intent with their current values

  • If the message is a confirmation prompt, the x-amz-lex-dialog-state header is set to Confirmation and the x-amz-lex-slot-to-elicit header is omitted.

  • If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the x-amz-dialog-state header is set to ElicitIntent and the x-amz-slot-to-elicit header is omitted.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

fn post_content( &self, input: PostContentRequest, @@ -515,7 +966,7 @@ impl LexRuntime for LexRuntimeClient { }) } - ///

Sends user input (text-only) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot.

In response, Amazon Lex returns the next message to convey to the user an optional responseCard to display. Consider the following example messages:

  • For a user input "I would like a pizza", Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?"

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to obtain user confirmation "Proceed with the pizza order?".

  • After the user replies to a confirmation prompt with a "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a "yes" or "no" user response. In addition to the message, Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the slotToElicit, dialogState, intentName, and slots fields in the response. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • dialogState set to ElicitSlot

    • intentName set to the intent name in the current context

    • slotToElicit set to the slot name for which the message is eliciting information

    • slots set to a map of slots, configured for the intent, with currently known values

  • If the message is a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit is set to null.

  • If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the dialogState is set to ElicitIntent and slotToElicit is set to null.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

+ ///

Sends user input (text or SSML) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot.

In response, Amazon Lex returns the next message to convey to the user an optional responseCard to display. Consider the following example messages:

  • For a user input "I would like a pizza", Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): "What size pizza would you like?"

  • After the user provides all of the pizza order information, Amazon Lex might return a response with a message to obtain user confirmation "Proceed with the pizza order?".

  • After the user replies to a confirmation prompt with a "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza has been ordered.".

Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a "yes" or "no" user response. In addition to the message, Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the slotToElicit, dialogState, intentName, and slots fields in the response. Consider the following examples:

  • If the message is to elicit slot data, Amazon Lex returns the following context information:

    • dialogState set to ElicitSlot

    • intentName set to the intent name in the current context

    • slotToElicit set to the slot name for which the message is eliciting information

    • slots set to a map of slots, configured for the intent, with currently known values

  • If the message is a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit is set to null.

  • If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the dialogState is set to ElicitIntent and slotToElicit is set to null.

In addition, Amazon Lex also returns your application-specific sessionAttributes. For more information, see Managing Conversation Context.

fn post_text(&self, input: PostTextRequest) -> RusotoFuture { let request_uri = format!( "/bot/{bot_name}/alias/{bot_alias}/user/{user_id}/text", @@ -549,4 +1000,85 @@ impl LexRuntime for LexRuntimeClient { } }) } + + ///

Creates a new session or modifies an existing session with an Amazon Lex bot. Use this operation to enable your application to set the state of the bot.

For more information, see Managing Sessions.

+ fn put_session( + &self, + input: PutSessionRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/bot/{bot_name}/alias/{bot_alias}/user/{user_id}/session", + bot_alias = input.bot_alias, + bot_name = input.bot_name, + user_id = input.user_id + ); + + let mut request = SignedRequest::new("POST", "lex", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + request.set_endpoint_prefix("runtime.lex".to_string()); + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + if let Some(ref accept) = input.accept { + request.add_header("Accept", &accept.to_string()); + } + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let mut result = PutSessionResponse::default(); + result.audio_stream = Some(response.body); + + if let Some(content_type) = response.headers.get("Content-Type") { + let value = content_type.to_owned(); + result.content_type = Some(value) + }; + if let Some(dialog_state) = response.headers.get("x-amz-lex-dialog-state") { + let value = dialog_state.to_owned(); + result.dialog_state = Some(value) + }; + if let Some(intent_name) = response.headers.get("x-amz-lex-intent-name") { + let value = intent_name.to_owned(); + result.intent_name = Some(value) + }; + if let Some(message) = response.headers.get("x-amz-lex-message") { + let value = message.to_owned(); + result.message = Some(value) + }; + if let Some(message_format) = response.headers.get("x-amz-lex-message-format") { + let value = message_format.to_owned(); + result.message_format = Some(value) + }; + if let Some(session_attributes) = + response.headers.get("x-amz-lex-session-attributes") + { + let value = session_attributes.to_owned(); + result.session_attributes = Some(value) + }; + if let Some(session_id) = response.headers.get("x-amz-lex-session-id") { + let value = session_id.to_owned(); + result.session_id = Some(value) + }; + if let Some(slot_to_elicit) = response.headers.get("x-amz-lex-slot-to-elicit") { + let value = slot_to_elicit.to_owned(); + result.slot_to_elicit = Some(value) + }; + if let Some(slots) = response.headers.get("x-amz-lex-slots") { + let value = slots.to_owned(); + result.slots = Some(value) + }; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(PutSessionError::from_response(response))), + ) + } + }) + } } diff --git a/rusoto/services/license-manager/Cargo.toml b/rusoto/services/license-manager/Cargo.toml index db876fa8a54..afb95438931 100644 --- a/rusoto/services/license-manager/Cargo.toml +++ b/rusoto/services/license-manager/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_license_manager" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/license-manager/README.md b/rusoto/services/license-manager/README.md index e9f25000eea..c4eb2824578 100644 --- a/rusoto/services/license-manager/README.md +++ b/rusoto/services/license-manager/README.md @@ -23,9 +23,16 @@ To use `rusoto_license_manager` in your application, add it as a dependency in y ```toml [dependencies] -rusoto_license_manager = "0.40.0" +rusoto_license_manager = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/license-manager/src/custom/mod.rs b/rusoto/services/license-manager/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/license-manager/src/custom/mod.rs +++ b/rusoto/services/license-manager/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/license-manager/src/generated.rs b/rusoto/services/license-manager/src/generated.rs index f5f2719e4cb..785d7ea067c 100644 --- a/rusoto/services/license-manager/src/generated.rs +++ b/rusoto/services/license-manager/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Details about license consumption.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConsumedLicenseSummary { ///

Number of licenses consumed by a resource.

#[serde(rename = "ConsumedLicenses")] @@ -69,7 +68,7 @@ pub struct CreateLicenseConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLicenseConfigurationResponse { ///

ARN of the license configuration object after its creation.

#[serde(rename = "LicenseConfigurationArn")] @@ -85,7 +84,7 @@ pub struct DeleteLicenseConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLicenseConfigurationResponse {} ///

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a Describe operation are documented with the Describe operation.

@@ -109,7 +108,7 @@ pub struct GetLicenseConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLicenseConfigurationResponse { ///

List of summaries for consumed licenses used by various resources.

#[serde(rename = "ConsumedLicenseSummaryList")] @@ -173,7 +172,7 @@ pub struct GetLicenseConfigurationResponse { pub struct GetServiceSettingsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetServiceSettingsResponse { ///

Indicates whether cross-account discovery has been enabled.

#[serde(rename = "EnableCrossAccountsDiscovery")] @@ -210,7 +209,7 @@ pub struct InventoryFilter { ///

A license configuration is an abstraction of a customer license agreement that can be consumed and enforced by License Manager. Components include specifications for the license type (licensing by instance, socket, CPU, or VCPU), tenancy (shared tenancy, Amazon EC2 Dedicated Instance, Amazon EC2 Dedicated Host, or any of these), host affinity (how long a VM must be associated with a host), the number of licenses purchased and used.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LicenseConfiguration { ///

List of summaries for licenses consumed by various resources.

#[serde(rename = "ConsumedLicenseSummaryList")] @@ -268,7 +267,7 @@ pub struct LicenseConfiguration { ///

Describes a server resource that is associated with a license configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LicenseConfigurationAssociation { ///

Time when the license configuration was associated with the resource.

#[serde(rename = "AssociationTime")] @@ -290,7 +289,7 @@ pub struct LicenseConfigurationAssociation { ///

Contains details of the usage of each resource from the license pool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LicenseConfigurationUsage { ///

Time when the license configuration was initially associated with a resource.

#[serde(rename = "AssociationTime")] @@ -342,7 +341,7 @@ pub struct ListAssociationsForLicenseConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssociationsForLicenseConfigurationResponse { ///

Lists association objects for the license configuration, each containing the association time, number of consumed licenses, resource ARN, resource ID, account ID that owns the resource, resource size, and resource type.

#[serde(rename = "LicenseConfigurationAssociations")] @@ -375,7 +374,7 @@ pub struct ListLicenseConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLicenseConfigurationsResponse { ///

Array of license configuration objects.

#[serde(rename = "LicenseConfigurations")] @@ -403,7 +402,7 @@ pub struct ListLicenseSpecificationsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLicenseSpecificationsForResourceResponse { ///

License configurations associated with a resource.

#[serde(rename = "LicenseSpecifications")] @@ -432,7 +431,7 @@ pub struct ListResourceInventoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceInventoryResponse { ///

Token for the next set of results.

#[serde(rename = "NextToken")] @@ -452,7 +451,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

List of tags attached to the resource.

#[serde(rename = "Tags")] @@ -480,7 +479,7 @@ pub struct ListUsageForLicenseConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsageForLicenseConfigurationResponse { ///

An array of LicenseConfigurationUsage objects.

#[serde(rename = "LicenseConfigurationUsageList")] @@ -494,7 +493,7 @@ pub struct ListUsageForLicenseConfigurationResponse { ///

Summary for a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ManagedResourceSummary { ///

Number of resources associated with licenses.

#[serde(rename = "AssociationCount")] @@ -516,7 +515,7 @@ pub struct OrganizationConfiguration { ///

A set of attributes that describe a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceInventory { ///

The platform of the resource.

#[serde(rename = "Platform")] @@ -568,7 +567,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -582,7 +581,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -617,7 +616,7 @@ pub struct UpdateLicenseConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateLicenseConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -636,7 +635,7 @@ pub struct UpdateLicenseSpecificationsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateLicenseSpecificationsForResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -660,7 +659,7 @@ pub struct UpdateServiceSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServiceSettingsResponse {} /// Errors returned by CreateLicenseConfiguration @@ -1812,10 +1811,7 @@ impl LicenseManagerClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> LicenseManagerClient { - LicenseManagerClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1829,10 +1825,14 @@ impl LicenseManagerClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - LicenseManagerClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> LicenseManagerClient { + LicenseManagerClient { client, region } } } diff --git a/rusoto/services/lightsail/Cargo.toml b/rusoto/services/lightsail/Cargo.toml index b6c4b239157..d792b11bfa0 100644 --- a/rusoto/services/lightsail/Cargo.toml +++ b/rusoto/services/lightsail/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_lightsail" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/lightsail/README.md b/rusoto/services/lightsail/README.md index 39c24f2eaed..aadaf65a09f 100644 --- a/rusoto/services/lightsail/README.md +++ b/rusoto/services/lightsail/README.md @@ -23,9 +23,16 @@ To use `rusoto_lightsail` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_lightsail = "0.40.0" +rusoto_lightsail = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/lightsail/src/custom/mod.rs b/rusoto/services/lightsail/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/lightsail/src/custom/mod.rs +++ b/rusoto/services/lightsail/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/lightsail/src/generated.rs b/rusoto/services/lightsail/src/generated.rs index 122ac869d11..f9076eeded8 100644 --- a/rusoto/services/lightsail/src/generated.rs +++ b/rusoto/services/lightsail/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct AllocateStaticIpRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AllocateStaticIpResult { ///

An array of key-value pairs containing information about the static IP address you allocated.

#[serde(rename = "operations")] @@ -54,7 +53,7 @@ pub struct AttachDiskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachDiskResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -73,7 +72,7 @@ pub struct AttachInstancesToLoadBalancerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachInstancesToLoadBalancerResult { ///

An object representing the API operations.

#[serde(rename = "operations")] @@ -92,7 +91,7 @@ pub struct AttachLoadBalancerTlsCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachLoadBalancerTlsCertificateResult { ///

An object representing the API operations.

These SSL/TLS certificates are only usable by Lightsail load balancers. You can't get the certificate and use it for another purpose.

#[serde(rename = "operations")] @@ -111,7 +110,7 @@ pub struct AttachStaticIpRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachStaticIpResult { ///

An array of key-value pairs containing information about your API operations.

#[serde(rename = "operations")] @@ -121,7 +120,7 @@ pub struct AttachStaticIpResult { ///

Describes an Availability Zone.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AvailabilityZone { ///

The state of the Availability Zone.

#[serde(rename = "state")] @@ -135,7 +134,7 @@ pub struct AvailabilityZone { ///

Describes a blueprint (a virtual private server image).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Blueprint { ///

The ID for the virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0).

#[serde(rename = "blueprintId")] @@ -189,7 +188,7 @@ pub struct Blueprint { ///

Describes a bundle, which is a set of specs describing your virtual private server (or instance).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Bundle { ///

The bundle ID (e.g., micro_1_0).

#[serde(rename = "bundleId")] @@ -248,7 +247,7 @@ pub struct CloseInstancePublicPortsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CloseInstancePublicPortsResult { ///

An array of key-value pairs that contains information about the operation.

#[serde(rename = "operation")] @@ -258,7 +257,7 @@ pub struct CloseInstancePublicPortsResult { ///

Describes a CloudFormation stack record created as a result of the create cloud formation stack operation.

A CloudFormation stack record provides information about the AWS CloudFormation stack used to create a new Amazon Elastic Compute Cloud instance from an exported Lightsail instance snapshot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CloudFormationStackRecord { ///

The Amazon Resource Name (ARN) of the CloudFormation stack record.

#[serde(rename = "arn")] @@ -296,7 +295,7 @@ pub struct CloudFormationStackRecord { ///

Describes the source of a CloudFormation stack record (i.e., the export snapshot record).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CloudFormationStackRecordSourceInfo { ///

The Amazon Resource Name (ARN) of the export snapshot record.

#[serde(rename = "arn")] @@ -326,7 +325,7 @@ pub struct CopySnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CopySnapshotResult { ///

A list of objects describing the API operation.

#[serde(rename = "operations")] @@ -342,7 +341,7 @@ pub struct CreateCloudFormationStackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCloudFormationStackResult { ///

A list of objects describing the API operation.

#[serde(rename = "operations")] @@ -371,7 +370,7 @@ pub struct CreateDiskFromSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDiskFromSnapshotResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -397,7 +396,7 @@ pub struct CreateDiskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDiskResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -425,7 +424,7 @@ pub struct CreateDiskSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDiskSnapshotResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -444,7 +443,7 @@ pub struct CreateDomainEntryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDomainEntryResult { ///

An array of key-value pairs containing information about the operation.

#[serde(rename = "operation")] @@ -464,7 +463,7 @@ pub struct CreateDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDomainResult { ///

An array of key-value pairs containing information about the domain resource you created.

#[serde(rename = "operation")] @@ -487,7 +486,7 @@ pub struct CreateInstanceSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInstanceSnapshotResult { ///

An array of key-value pairs containing information about the results of your create instances snapshot request.

#[serde(rename = "operations")] @@ -528,7 +527,7 @@ pub struct CreateInstancesFromSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInstancesFromSnapshotResult { ///

An array of key-value pairs containing information about the results of your create instances from snapshot request.

#[serde(rename = "operations")] @@ -565,7 +564,7 @@ pub struct CreateInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInstancesResult { ///

An array of key-value pairs containing information about the results of your create instances request.

#[serde(rename = "operations")] @@ -585,7 +584,7 @@ pub struct CreateKeyPairRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateKeyPairResult { ///

An array of key-value pairs containing information about the new key pair you just created.

#[serde(rename = "keyPair")] @@ -636,7 +635,7 @@ pub struct CreateLoadBalancerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLoadBalancerResult { ///

An object containing information about the API operations.

#[serde(rename = "operations")] @@ -666,7 +665,7 @@ pub struct CreateLoadBalancerTlsCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLoadBalancerTlsCertificateResult { ///

An object containing information about the API operations.

#[serde(rename = "operations")] @@ -714,7 +713,7 @@ pub struct CreateRelationalDatabaseFromSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRelationalDatabaseFromSnapshotResult { ///

An object describing the result of your create relational database from snapshot request.

#[serde(rename = "operations")] @@ -766,7 +765,7 @@ pub struct CreateRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRelationalDatabaseResult { ///

An object describing the result of your create relational database request.

#[serde(rename = "operations")] @@ -789,7 +788,7 @@ pub struct CreateRelationalDatabaseSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRelationalDatabaseSnapshotResult { ///

An object describing the result of your create relational database snapshot request.

#[serde(rename = "operations")] @@ -805,7 +804,7 @@ pub struct DeleteDiskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDiskResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -821,7 +820,7 @@ pub struct DeleteDiskSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDiskSnapshotResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -840,7 +839,7 @@ pub struct DeleteDomainEntryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDomainEntryResult { ///

An array of key-value pairs containing information about the results of your delete domain entry request.

#[serde(rename = "operation")] @@ -856,7 +855,7 @@ pub struct DeleteDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDomainResult { ///

An array of key-value pairs containing information about the results of your delete domain request.

#[serde(rename = "operation")] @@ -872,7 +871,7 @@ pub struct DeleteInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInstanceResult { ///

An array of key-value pairs containing information about the results of your delete instance request.

#[serde(rename = "operations")] @@ -888,7 +887,7 @@ pub struct DeleteInstanceSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInstanceSnapshotResult { ///

An array of key-value pairs containing information about the results of your delete instance snapshot request.

#[serde(rename = "operations")] @@ -904,7 +903,7 @@ pub struct DeleteKeyPairRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteKeyPairResult { ///

An array of key-value pairs containing information about the results of your delete key pair request.

#[serde(rename = "operation")] @@ -920,7 +919,7 @@ pub struct DeleteKnownHostKeysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteKnownHostKeysResult { ///

A list of objects describing the API operation.

#[serde(rename = "operations")] @@ -936,7 +935,7 @@ pub struct DeleteLoadBalancerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLoadBalancerResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -959,7 +958,7 @@ pub struct DeleteLoadBalancerTlsCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLoadBalancerTlsCertificateResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -983,7 +982,7 @@ pub struct DeleteRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRelationalDatabaseResult { ///

An object describing the result of your delete relational database request.

#[serde(rename = "operations")] @@ -999,7 +998,7 @@ pub struct DeleteRelationalDatabaseSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRelationalDatabaseSnapshotResult { ///

An object describing the result of your delete relational database snapshot request.

#[serde(rename = "operations")] @@ -1009,7 +1008,7 @@ pub struct DeleteRelationalDatabaseSnapshotResult { ///

Describes the destination of a record.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DestinationInfo { ///

The ID of the resource created at the destination.

#[serde(rename = "id")] @@ -1029,7 +1028,7 @@ pub struct DetachDiskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachDiskResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -1048,7 +1047,7 @@ pub struct DetachInstancesFromLoadBalancerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachInstancesFromLoadBalancerResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -1064,7 +1063,7 @@ pub struct DetachStaticIpRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachStaticIpResult { ///

An array of key-value pairs containing information about the results of your detach static IP request.

#[serde(rename = "operations")] @@ -1074,7 +1073,7 @@ pub struct DetachStaticIpResult { ///

Describes a system disk or an block storage disk.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Disk { ///

The Amazon Resource Name (ARN) of the disk.

#[serde(rename = "arn")] @@ -1136,7 +1135,7 @@ pub struct Disk { ///

Describes a disk.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiskInfo { ///

A Boolean value indicating whether this disk is a system disk (has an operating system loaded on it).

#[serde(rename = "isSystemDisk")] @@ -1171,7 +1170,7 @@ pub struct DiskMap { ///

Describes a block storage disk snapshot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiskSnapshot { ///

The Amazon Resource Name (ARN) of the disk snapshot.

#[serde(rename = "arn")] @@ -1233,7 +1232,7 @@ pub struct DiskSnapshot { ///

Describes a disk snapshot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiskSnapshotInfo { ///

The size of the disk in GB (e.g., 32).

#[serde(rename = "sizeInGb")] @@ -1243,7 +1242,7 @@ pub struct DiskSnapshotInfo { ///

Describes a domain where you are storing recordsets in Lightsail.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Domain { ///

The Amazon Resource Name (ARN) of the domain recordset (e.g., arn:aws:lightsail:global:123456789101:Domain/824cede0-abc7-4f84-8dbc-12345EXAMPLE).

#[serde(rename = "arn")] @@ -1308,7 +1307,7 @@ pub struct DomainEntry { pub struct DownloadDefaultKeyPairRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DownloadDefaultKeyPairResult { ///

A base64-encoded RSA private key.

#[serde(rename = "privateKeyBase64")] @@ -1322,7 +1321,7 @@ pub struct DownloadDefaultKeyPairResult { ///

Describes an export snapshot record.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportSnapshotRecord { ///

The Amazon Resource Name (ARN) of the export snapshot record.

#[serde(rename = "arn")] @@ -1360,7 +1359,7 @@ pub struct ExportSnapshotRecord { ///

Describes the source of an export snapshot record.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportSnapshotRecordSourceInfo { ///

The Amazon Resource Name (ARN) of the source instance or disk snapshot.

#[serde(rename = "arn")] @@ -1404,7 +1403,7 @@ pub struct ExportSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportSnapshotResult { ///

A list of objects describing the API operation.

#[serde(rename = "operations")] @@ -1421,7 +1420,7 @@ pub struct GetActiveNamesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetActiveNamesResult { ///

The list of active names returned by the get active names request.

#[serde(rename = "activeNames")] @@ -1446,7 +1445,7 @@ pub struct GetBlueprintsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBlueprintsResult { ///

An array of key-value pairs that contains information about the available blueprints.

#[serde(rename = "blueprints")] @@ -1471,7 +1470,7 @@ pub struct GetBundlesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBundlesResult { ///

An array of key-value pairs that contains information about the available bundles.

#[serde(rename = "bundles")] @@ -1492,7 +1491,7 @@ pub struct GetCloudFormationStackRecordsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCloudFormationStackRecordsResult { ///

A list of objects describing the CloudFormation stack records.

#[serde(rename = "cloudFormationStackRecords")] @@ -1512,7 +1511,7 @@ pub struct GetDiskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDiskResult { ///

An object containing information about the disk.

#[serde(rename = "disk")] @@ -1528,7 +1527,7 @@ pub struct GetDiskSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDiskSnapshotResult { ///

An object containing information about the disk snapshot.

#[serde(rename = "diskSnapshot")] @@ -1545,7 +1544,7 @@ pub struct GetDiskSnapshotsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDiskSnapshotsResult { ///

An array of objects containing information about all block storage disk snapshots.

#[serde(rename = "diskSnapshots")] @@ -1566,7 +1565,7 @@ pub struct GetDisksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDisksResult { ///

An array of objects containing information about all block storage disks.

#[serde(rename = "disks")] @@ -1586,7 +1585,7 @@ pub struct GetDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainResult { ///

An array of key-value pairs containing information about your get domain request.

#[serde(rename = "domain")] @@ -1603,7 +1602,7 @@ pub struct GetDomainsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainsResult { ///

An array of key-value pairs containing information about each of the domain entries in the user's account.

#[serde(rename = "domains")] @@ -1624,7 +1623,7 @@ pub struct GetExportSnapshotRecordsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetExportSnapshotRecordsResult { ///

A list of objects describing the export snapshot records.

#[serde(rename = "exportSnapshotRecords")] @@ -1648,7 +1647,7 @@ pub struct GetInstanceAccessDetailsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceAccessDetailsResult { ///

An array of key-value pairs containing information about a get instance access request.

#[serde(rename = "accessDetails")] @@ -1682,7 +1681,7 @@ pub struct GetInstanceMetricDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceMetricDataResult { ///

An array of key-value pairs containing information about the results of your get instance metric data request.

#[serde(rename = "metricData")] @@ -1702,7 +1701,7 @@ pub struct GetInstancePortStatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstancePortStatesResult { ///

Information about the port states resulting from your request.

#[serde(rename = "portStates")] @@ -1718,7 +1717,7 @@ pub struct GetInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceResult { ///

An array of key-value pairs containing information about the specified instance.

#[serde(rename = "instance")] @@ -1734,7 +1733,7 @@ pub struct GetInstanceSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceSnapshotResult { ///

An array of key-value pairs containing information about the results of your get instance snapshot request.

#[serde(rename = "instanceSnapshot")] @@ -1751,7 +1750,7 @@ pub struct GetInstanceSnapshotsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceSnapshotsResult { ///

An array of key-value pairs containing information about the results of your get instance snapshots request.

#[serde(rename = "instanceSnapshots")] @@ -1771,7 +1770,7 @@ pub struct GetInstanceStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceStateResult { ///

The state of the instance.

#[serde(rename = "state")] @@ -1788,7 +1787,7 @@ pub struct GetInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstancesResult { ///

An array of key-value pairs containing information about your instances.

#[serde(rename = "instances")] @@ -1808,7 +1807,7 @@ pub struct GetKeyPairRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetKeyPairResult { ///

An array of key-value pairs containing information about the key pair.

#[serde(rename = "keyPair")] @@ -1825,7 +1824,7 @@ pub struct GetKeyPairsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetKeyPairsResult { ///

An array of key-value pairs containing information about the key pairs.

#[serde(rename = "keyPairs")] @@ -1863,7 +1862,7 @@ pub struct GetLoadBalancerMetricDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoadBalancerMetricDataResult { ///

An array of metric datapoint objects.

#[serde(rename = "metricData")] @@ -1883,7 +1882,7 @@ pub struct GetLoadBalancerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoadBalancerResult { ///

An object containing information about your load balancer.

#[serde(rename = "loadBalancer")] @@ -1899,7 +1898,7 @@ pub struct GetLoadBalancerTlsCertificatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoadBalancerTlsCertificatesResult { ///

An array of LoadBalancerTlsCertificate objects describing your SSL/TLS certificates.

#[serde(rename = "tlsCertificates")] @@ -1916,7 +1915,7 @@ pub struct GetLoadBalancersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoadBalancersResult { ///

An array of LoadBalancer objects describing your load balancers.

#[serde(rename = "loadBalancers")] @@ -1936,7 +1935,7 @@ pub struct GetOperationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOperationResult { ///

An array of key-value pairs containing information about the results of your get operation request.

#[serde(rename = "operation")] @@ -1956,7 +1955,7 @@ pub struct GetOperationsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOperationsForResourceResult { ///

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

#[serde(rename = "nextPageToken")] @@ -1977,7 +1976,7 @@ pub struct GetOperationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOperationsResult { ///

A token used for advancing to the next page of results from your get operations request.

#[serde(rename = "nextPageToken")] @@ -2002,7 +2001,7 @@ pub struct GetRegionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRegionsResult { ///

An array of key-value pairs containing information about your get regions request.

#[serde(rename = "regions")] @@ -2019,7 +2018,7 @@ pub struct GetRelationalDatabaseBlueprintsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseBlueprintsResult { ///

An object describing the result of your get relational database blueprints request.

#[serde(rename = "blueprints")] @@ -2040,7 +2039,7 @@ pub struct GetRelationalDatabaseBundlesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseBundlesResult { ///

An object describing the result of your get relational database bundles request.

#[serde(rename = "bundles")] @@ -2068,7 +2067,7 @@ pub struct GetRelationalDatabaseEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseEventsResult { ///

A token used for advancing to the next page of results from your get relational database events request.

#[serde(rename = "nextPageToken")] @@ -2107,7 +2106,7 @@ pub struct GetRelationalDatabaseLogEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseLogEventsResult { ///

A token used for advancing to the previous page of results from your get relational database log events request.

#[serde(rename = "nextBackwardToken")] @@ -2131,7 +2130,7 @@ pub struct GetRelationalDatabaseLogStreamsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseLogStreamsResult { ///

An object describing the result of your get relational database log streams request.

#[serde(rename = "logStreams")] @@ -2151,7 +2150,7 @@ pub struct GetRelationalDatabaseMasterUserPasswordRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseMasterUserPasswordResult { ///

The timestamp when the specified version of the master user password was created.

#[serde(rename = "createdAt")] @@ -2189,7 +2188,7 @@ pub struct GetRelationalDatabaseMetricDataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseMetricDataResult { ///

An object describing the result of your get relational database metric data request.

#[serde(rename = "metricData")] @@ -2213,7 +2212,7 @@ pub struct GetRelationalDatabaseParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseParametersResult { ///

A token used for advancing to the next page of results from your get static IPs request.

#[serde(rename = "nextPageToken")] @@ -2233,7 +2232,7 @@ pub struct GetRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseResult { ///

An object describing the specified database.

#[serde(rename = "relationalDatabase")] @@ -2249,7 +2248,7 @@ pub struct GetRelationalDatabaseSnapshotRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseSnapshotResult { ///

An object describing the specified database snapshot.

#[serde(rename = "relationalDatabaseSnapshot")] @@ -2266,7 +2265,7 @@ pub struct GetRelationalDatabaseSnapshotsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabaseSnapshotsResult { ///

A token used for advancing to the next page of results from your get relational database snapshots request.

#[serde(rename = "nextPageToken")] @@ -2287,7 +2286,7 @@ pub struct GetRelationalDatabasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRelationalDatabasesResult { ///

A token used for advancing to the next page of results from your get relational databases request.

#[serde(rename = "nextPageToken")] @@ -2307,7 +2306,7 @@ pub struct GetStaticIpRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetStaticIpResult { ///

An array of key-value pairs containing information about the requested static IP.

#[serde(rename = "staticIp")] @@ -2324,7 +2323,7 @@ pub struct GetStaticIpsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetStaticIpsResult { ///

A token used for advancing to the next page of results from your get static IPs request.

#[serde(rename = "nextPageToken")] @@ -2338,7 +2337,7 @@ pub struct GetStaticIpsResult { ///

Describes the public SSH host keys or the RDP certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HostKeyAttributes { ///

The SSH host key algorithm or the RDP certificate format.

For SSH host keys, the algorithm may be ssh-rsa, ecdsa-sha2-nistp256, ssh-ed25519, etc. For RDP certificates, the algorithm is always x509-cert.

#[serde(rename = "algorithm")] @@ -2381,7 +2380,7 @@ pub struct ImportKeyPairRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportKeyPairResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operation")] @@ -2391,7 +2390,7 @@ pub struct ImportKeyPairResult { ///

Describes an instance (a virtual private server).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Instance { ///

The Amazon Resource Name (ARN) of the instance (e.g., arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE).

#[serde(rename = "arn")] @@ -2473,7 +2472,7 @@ pub struct Instance { ///

The parameters for gaining temporary access to one of your Amazon Lightsail instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceAccessDetails { ///

For SSH access, the public key to use when accessing your instance For OpenSSH clients (e.g., command line SSH), you should save this value to tempkey-cert.pub.

#[serde(rename = "certKey")] @@ -2540,7 +2539,7 @@ pub struct InstanceEntry { ///

Describes the hardware for the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceHardware { ///

The number of vCPUs the instance has.

#[serde(rename = "cpuCount")] @@ -2558,7 +2557,7 @@ pub struct InstanceHardware { ///

Describes information about the health of the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceHealthSummary { ///

Describes the overall instance health. Valid values are below.

#[serde(rename = "instanceHealth")] @@ -2576,7 +2575,7 @@ pub struct InstanceHealthSummary { ///

Describes monthly data transfer rates and port information for an instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceNetworking { ///

The amount of data in GB allocated for monthly data transfers.

#[serde(rename = "monthlyTransfer")] @@ -2590,7 +2589,7 @@ pub struct InstanceNetworking { ///

Describes information about the instance ports.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstancePortInfo { ///

The access direction (inbound or outbound).

#[serde(rename = "accessDirection")] @@ -2624,7 +2623,7 @@ pub struct InstancePortInfo { ///

Describes the port state.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstancePortState { ///

The first port in the range.

#[serde(rename = "fromPort")] @@ -2646,7 +2645,7 @@ pub struct InstancePortState { ///

Describes the snapshot of the virtual private server, or instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceSnapshot { ///

The Amazon Resource Name (ARN) of the snapshot (e.g., arn:aws:lightsail:us-east-2:123456789101:InstanceSnapshot/d23b5706-3322-4d83-81e5-12345EXAMPLE).

#[serde(rename = "arn")] @@ -2712,7 +2711,7 @@ pub struct InstanceSnapshot { ///

Describes an instance snapshot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceSnapshotInfo { ///

The blueprint ID from which the source instance (e.g., os_debian_8_3).

#[serde(rename = "fromBlueprintId")] @@ -2730,7 +2729,7 @@ pub struct InstanceSnapshotInfo { ///

Describes the virtual private server (or instance) status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceState { ///

The status code for the instance.

#[serde(rename = "code")] @@ -2746,7 +2745,7 @@ pub struct InstanceState { pub struct IsVpcPeeredRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IsVpcPeeredResult { ///

Returns true if the Lightsail VPC is peered; otherwise, false.

#[serde(rename = "isPeered")] @@ -2756,7 +2755,7 @@ pub struct IsVpcPeeredResult { ///

Describes the SSH key pair.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct KeyPair { ///

The Amazon Resource Name (ARN) of the key pair (e.g., arn:aws:lightsail:us-east-2:123456789101:KeyPair/05859e3d-331d-48ba-9034-12345EXAMPLE).

#[serde(rename = "arn")] @@ -2794,7 +2793,7 @@ pub struct KeyPair { ///

Describes the Lightsail load balancer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBalancer { ///

The Amazon Resource Name (ARN) of the load balancer.

#[serde(rename = "arn")] @@ -2864,7 +2863,7 @@ pub struct LoadBalancer { ///

Describes a load balancer SSL/TLS certificate.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBalancerTlsCertificate { ///

The Amazon Resource Name (ARN) of the SSL/TLS certificate.

#[serde(rename = "arn")] @@ -2970,7 +2969,7 @@ pub struct LoadBalancerTlsCertificate { ///

Contains information about the domain names on an SSL/TLS certificate that you will use to validate domain ownership.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBalancerTlsCertificateDomainValidationOption { ///

The fully qualified domain name in the certificate request.

#[serde(rename = "domainName")] @@ -2984,7 +2983,7 @@ pub struct LoadBalancerTlsCertificateDomainValidationOption { ///

Describes the validation record of each domain name in the SSL/TLS certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBalancerTlsCertificateDomainValidationRecord { ///

The domain name against which your SSL/TLS certificate was validated.

#[serde(rename = "domainName")] @@ -3010,7 +3009,7 @@ pub struct LoadBalancerTlsCertificateDomainValidationRecord { ///

Contains information about the status of Lightsail's managed renewal for the certificate.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBalancerTlsCertificateRenewalSummary { ///

Contains information about the validation of each domain name in the certificate, as it pertains to Lightsail's managed renewal. This is different from the initial validation that occurs as a result of the RequestCertificate request.

#[serde(rename = "domainValidationOptions")] @@ -3024,7 +3023,7 @@ pub struct LoadBalancerTlsCertificateRenewalSummary { ///

Provides a summary of SSL/TLS certificate metadata.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBalancerTlsCertificateSummary { ///

When true, the SSL/TLS certificate is attached to the Lightsail load balancer.

#[serde(rename = "isAttached")] @@ -3038,7 +3037,7 @@ pub struct LoadBalancerTlsCertificateSummary { ///

Describes a database log event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogEvent { ///

The timestamp when the database log event was created.

#[serde(rename = "createdAt")] @@ -3052,7 +3051,7 @@ pub struct LogEvent { ///

Describes the metric data point.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MetricDatapoint { ///

The average.

#[serde(rename = "average")] @@ -3086,7 +3085,7 @@ pub struct MetricDatapoint { ///

Describes the monthly data transfer in and out of your virtual private server (or instance).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MonthlyTransfer { ///

The amount allocated per month (in GB).

#[serde(rename = "gbPerMonthAllocated")] @@ -3105,7 +3104,7 @@ pub struct OpenInstancePublicPortsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OpenInstancePublicPortsResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operation")] @@ -3115,7 +3114,7 @@ pub struct OpenInstancePublicPortsResult { ///

Describes the API operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Operation { ///

The timestamp when the operation was initialized (e.g., 1479816991.349).

#[serde(rename = "createdAt")] @@ -3169,7 +3168,7 @@ pub struct Operation { ///

The password data for the Windows Server-based instance, including the ciphertext and the key pair name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PasswordData { ///

The encrypted password. Ciphertext will be an empty string if access to your new instance is not ready yet. When you create an instance, it can take up to 15 minutes for the instance to be ready.

If you use the default key pair (LightsailDefaultKeyPair), the decrypted password will be available in the password field.

If you are using a custom key pair, you need to use your own means of decryption.

If you change the Administrator password on the instance, Lightsail will continue to return the original ciphertext value. When accessing the instance using RDP, you need to manually enter the Administrator password after changing it from the default.

#[serde(rename = "ciphertext")] @@ -3185,7 +3184,7 @@ pub struct PasswordData { pub struct PeerVpcRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PeerVpcResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operation")] @@ -3195,7 +3194,7 @@ pub struct PeerVpcResult { ///

Describes a pending database maintenance action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PendingMaintenanceAction { ///

The type of pending database maintenance action.

#[serde(rename = "action")] @@ -3213,7 +3212,7 @@ pub struct PendingMaintenanceAction { ///

Describes a pending database value modification.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PendingModifiedRelationalDatabaseValues { ///

A Boolean value indicating whether automated backup retention is enabled.

#[serde(rename = "backupRetentionEnabled")] @@ -3257,7 +3256,7 @@ pub struct PutInstancePublicPortsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutInstancePublicPortsResult { ///

Describes metadata about the operation you just executed.

#[serde(rename = "operation")] @@ -3273,7 +3272,7 @@ pub struct RebootInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebootInstanceResult { ///

An array of key-value pairs containing information about the request operations.

#[serde(rename = "operations")] @@ -3289,7 +3288,7 @@ pub struct RebootRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebootRelationalDatabaseResult { ///

An object describing the result of your reboot relational database request.

#[serde(rename = "operations")] @@ -3299,7 +3298,7 @@ pub struct RebootRelationalDatabaseResult { ///

Describes the AWS Region.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Region { ///

The Availability Zones. Follows the format us-east-2a (case-sensitive).

#[serde(rename = "availabilityZones")] @@ -3329,7 +3328,7 @@ pub struct Region { ///

Describes a database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabase { ///

The Amazon Resource Name (ARN) of the database.

#[serde(rename = "arn")] @@ -3435,7 +3434,7 @@ pub struct RelationalDatabase { ///

Describes a database image, or blueprint. A blueprint describes the major engine version of a database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabaseBlueprint { ///

The ID for the database blueprint.

#[serde(rename = "blueprintId")] @@ -3465,7 +3464,7 @@ pub struct RelationalDatabaseBlueprint { ///

Describes a database bundle. A bundle describes the performance specifications of the database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabaseBundle { ///

The ID for the database bundle.

#[serde(rename = "bundleId")] @@ -3507,7 +3506,7 @@ pub struct RelationalDatabaseBundle { ///

Describes an endpoint for a database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabaseEndpoint { ///

Specifies the DNS address of the database.

#[serde(rename = "address")] @@ -3521,7 +3520,7 @@ pub struct RelationalDatabaseEndpoint { ///

Describes an event for a database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabaseEvent { ///

The timestamp when the database event was created.

#[serde(rename = "createdAt")] @@ -3543,7 +3542,7 @@ pub struct RelationalDatabaseEvent { ///

Describes the hardware of a database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabaseHardware { ///

The number of vCPUs for the database.

#[serde(rename = "cpuCount")] @@ -3598,7 +3597,7 @@ pub struct RelationalDatabaseParameter { ///

Describes a database snapshot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RelationalDatabaseSnapshot { ///

The Amazon Resource Name (ARN) of the database snapshot.

#[serde(rename = "arn")] @@ -3670,7 +3669,7 @@ pub struct ReleaseStaticIpRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReleaseStaticIpResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operations")] @@ -3680,7 +3679,7 @@ pub struct ReleaseStaticIpResult { ///

Describes the resource location.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceLocation { ///

The Availability Zone. Follows the format us-east-2a (case-sensitive).

#[serde(rename = "availabilityZone")] @@ -3700,7 +3699,7 @@ pub struct StartInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartInstanceResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operations")] @@ -3716,7 +3715,7 @@ pub struct StartRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartRelationalDatabaseResult { ///

An object describing the result of your start relational database request.

#[serde(rename = "operations")] @@ -3726,7 +3725,7 @@ pub struct StartRelationalDatabaseResult { ///

Describes the static IP.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StaticIp { ///

The Amazon Resource Name (ARN) of the static IP (e.g., arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE).

#[serde(rename = "arn")] @@ -3778,7 +3777,7 @@ pub struct StopInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopInstanceResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operations")] @@ -3798,7 +3797,7 @@ pub struct StopRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopRelationalDatabaseResult { ///

An object describing the result of your stop relational database request.

#[serde(rename = "operations")] @@ -3830,7 +3829,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResult { ///

A list of objects describing the API operation.

#[serde(rename = "operations")] @@ -3842,7 +3841,7 @@ pub struct TagResourceResult { pub struct UnpeerVpcRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnpeerVpcResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operation")] @@ -3861,7 +3860,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResult { ///

A list of objects describing the API operation.

#[serde(rename = "operations")] @@ -3880,7 +3879,7 @@ pub struct UpdateDomainEntryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainEntryResult { ///

An array of key-value pairs containing information about the request operation.

#[serde(rename = "operations")] @@ -3902,7 +3901,7 @@ pub struct UpdateLoadBalancerAttributeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateLoadBalancerAttributeResult { ///

An object describing the API operations.

#[serde(rename = "operations")] @@ -3921,7 +3920,7 @@ pub struct UpdateRelationalDatabaseParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRelationalDatabaseParametersResult { ///

An object describing the result of your update relational database parameters request.

#[serde(rename = "operations")] @@ -3969,7 +3968,7 @@ pub struct UpdateRelationalDatabaseRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRelationalDatabaseResult { ///

An object describing the result of your update relational database request.

#[serde(rename = "operations")] @@ -12166,10 +12165,7 @@ impl LightsailClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> LightsailClient { - LightsailClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -12183,10 +12179,14 @@ impl LightsailClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - LightsailClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> LightsailClient { + LightsailClient { client, region } } } diff --git a/rusoto/services/logs/Cargo.toml b/rusoto/services/logs/Cargo.toml index a01b8437784..52000657829 100644 --- a/rusoto/services/logs/Cargo.toml +++ b/rusoto/services/logs/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_logs" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,7 +22,7 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false @@ -30,10 +30,12 @@ default-features = false chrono = "0.4" [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/logs/README.md b/rusoto/services/logs/README.md index b0742e1676e..a9b455de072 100644 --- a/rusoto/services/logs/README.md +++ b/rusoto/services/logs/README.md @@ -23,9 +23,16 @@ To use `rusoto_logs` in your application, add it as a dependency in your `Cargo. ```toml [dependencies] -rusoto_logs = "0.40.0" +rusoto_logs = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/logs/examples/put_log_events.rs b/rusoto/services/logs/examples/put_log_events.rs index 016e46dce2e..f862c672ac1 100644 --- a/rusoto/services/logs/examples/put_log_events.rs +++ b/rusoto/services/logs/examples/put_log_events.rs @@ -8,15 +8,14 @@ extern crate chrono; extern crate rusoto_core; extern crate rusoto_logs; -use chrono::{Utc}; +use chrono::Utc; -use std::default::Default; use rusoto_core::Region; -use rusoto_logs::{CloudWatchLogs, - CloudWatchLogsClient, - DescribeLogStreamsRequest, - InputLogEvent, - PutLogEventsRequest}; +use rusoto_logs::{ + CloudWatchLogs, CloudWatchLogsClient, DescribeLogStreamsRequest, InputLogEvent, + PutLogEventsRequest, +}; +use std::default::Default; fn main() { const LOG_GROUP_NAME: &'static str = "testing"; @@ -35,9 +34,9 @@ fn main() { let streams_resp = client.describe_log_streams(desc_streams_req).sync(); let log_streams = streams_resp.unwrap().log_streams.unwrap(); let stream = &log_streams - .iter() - .find(|s| s.log_stream_name == Some(LOG_STREAM_NAME.to_string())) - .unwrap(); + .iter() + .find(|s| s.log_stream_name == Some(LOG_STREAM_NAME.to_string())) + .unwrap(); let sequence_token = stream.upload_sequence_token.clone(); let put_log_events_request = PutLogEventsRequest { diff --git a/rusoto/services/logs/src/custom/mod.rs b/rusoto/services/logs/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/logs/src/custom/mod.rs +++ b/rusoto/services/logs/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/logs/src/generated.rs b/rusoto/services/logs/src/generated.rs index 00b1308fbfe..3a53b786e50 100644 --- a/rusoto/services/logs/src/generated.rs +++ b/rusoto/services/logs/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -70,7 +69,7 @@ pub struct CreateExportTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateExportTaskResponse { ///

The ID of the export task.

#[serde(rename = "taskId")] @@ -179,7 +178,7 @@ pub struct DescribeDestinationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDestinationsResponse { ///

The destinations.

#[serde(rename = "destinations")] @@ -211,7 +210,7 @@ pub struct DescribeExportTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeExportTasksResponse { ///

The export tasks.

#[serde(rename = "exportTasks")] @@ -239,7 +238,7 @@ pub struct DescribeLogGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLogGroupsResponse { ///

The log groups.

#[serde(rename = "logGroups")] @@ -278,7 +277,7 @@ pub struct DescribeLogStreamsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLogStreamsResponse { ///

The log streams.

#[serde(rename = "logStreams")] @@ -318,7 +317,7 @@ pub struct DescribeMetricFiltersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMetricFiltersResponse { ///

The metric filters.

#[serde(rename = "metricFilters")] @@ -349,7 +348,7 @@ pub struct DescribeQueriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeQueriesResponse { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -372,7 +371,7 @@ pub struct DescribeResourcePoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeResourcePoliciesResponse { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -403,7 +402,7 @@ pub struct DescribeSubscriptionFiltersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSubscriptionFiltersResponse { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -416,7 +415,7 @@ pub struct DescribeSubscriptionFiltersResponse { ///

Represents a cross-account destination that receives subscription log events.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Destination { ///

An IAM policy document that governs which AWS accounts can create subscription filters against this destination.

#[serde(rename = "accessPolicy")] @@ -453,7 +452,7 @@ pub struct DisassociateKmsKeyRequest { ///

Represents an export task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportTask { ///

The name of Amazon S3 bucket to which the log data was exported.

#[serde(rename = "destination")] @@ -495,7 +494,7 @@ pub struct ExportTask { ///

Represents the status of an export task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportTaskExecutionInfo { ///

The completion time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

#[serde(rename = "completionTime")] @@ -509,7 +508,7 @@ pub struct ExportTaskExecutionInfo { ///

Represents the status of an export task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportTaskStatus { ///

The status code of the export task.

#[serde(rename = "code")] @@ -531,10 +530,6 @@ pub struct FilterLogEventsRequest { #[serde(rename = "filterPattern")] #[serde(skip_serializing_if = "Option::is_none")] pub filter_pattern: Option, - ///

If the value is true, the operation makes a best effort to provide responses that contain events from multiple log streams within the log group, interleaved in a single response. If the value is false, all the matched log events in the first log stream are searched first, then those in the next log stream, and so on. The default is false.

IMPORTANT: Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group.

- #[serde(rename = "interleaved")] - #[serde(skip_serializing_if = "Option::is_none")] - pub interleaved: Option, ///

The maximum number of events to return. The default is 10,000 events.

#[serde(rename = "limit")] #[serde(skip_serializing_if = "Option::is_none")] @@ -561,7 +556,7 @@ pub struct FilterLogEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FilterLogEventsResponse { ///

The matched events.

#[serde(rename = "events")] @@ -579,7 +574,7 @@ pub struct FilterLogEventsResponse { ///

Represents a matched event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FilteredLogEvent { ///

The ID of the event.

#[serde(rename = "eventId")] @@ -623,7 +618,7 @@ pub struct GetLogEventsRequest { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

If the value is true, the earliest log events are returned first. If the value is false, the latest log events are returned first. The default value is false.

+ ///

If the value is true, the earliest log events are returned first. If the value is false, the latest log events are returned first. The default value is false.

If you are using nextToken in this operation, you must specify true for startFromHead.

#[serde(rename = "startFromHead")] #[serde(skip_serializing_if = "Option::is_none")] pub start_from_head: Option, @@ -634,7 +629,7 @@ pub struct GetLogEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLogEventsResponse { ///

The events.

#[serde(rename = "events")] @@ -662,7 +657,7 @@ pub struct GetLogGroupFieldsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLogGroupFieldsResponse { ///

The array of fields found in the query. Each object in the array contains the name of the field, along with the percentage of time it appeared in the log events that were queried.

#[serde(rename = "logGroupFields")] @@ -678,7 +673,7 @@ pub struct GetLogRecordRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLogRecordResponse { ///

The requested log event, as a JSON string.

#[serde(rename = "logRecord")] @@ -694,7 +689,7 @@ pub struct GetQueryResultsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetQueryResultsResponse { ///

The log events that matched the query criteria during the most recent time it ran.

The results value is an array of arrays. Each log event is one object in the top-level array. Each of these log event objects is an array of field/value pairs.

#[serde(rename = "results")] @@ -729,7 +724,7 @@ pub struct ListTagsLogGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsLogGroupResponse { ///

The tags for the log group.

#[serde(rename = "tags")] @@ -739,7 +734,7 @@ pub struct ListTagsLogGroupResponse { ///

Represents a log group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogGroup { ///

The Amazon Resource Name (ARN) of the log group.

#[serde(rename = "arn")] @@ -772,7 +767,7 @@ pub struct LogGroup { ///

The fields contained in log events found by a GetLogGroupFields operation, along with the percentage of queried log events in which each field appears.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogGroupField { ///

The name of a log field.

#[serde(rename = "name")] @@ -786,7 +781,7 @@ pub struct LogGroupField { ///

Represents a log stream, which is a sequence of log events from a single emitter of logs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogStream { ///

The Amazon Resource Name (ARN) of the log stream.

#[serde(rename = "arn")] @@ -812,10 +807,6 @@ pub struct LogStream { #[serde(rename = "logStreamName")] #[serde(skip_serializing_if = "Option::is_none")] pub log_stream_name: Option, - ///

The number of bytes stored.

IMPORTANT: Starting on June 17, 2019, this parameter will be deprecated for log streams, and will be reported as zero. This change applies only to log streams. The storedBytes parameter for log groups is not affected.

- #[serde(rename = "storedBytes")] - #[serde(skip_serializing_if = "Option::is_none")] - pub stored_bytes: Option, ///

The sequence token.

#[serde(rename = "uploadSequenceToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -824,7 +815,7 @@ pub struct LogStream { ///

Metric filters express how CloudWatch Logs would extract metric observations from ingested log events and transform them into metric data in a CloudWatch metric.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MetricFilter { ///

The creation time of the metric filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

#[serde(rename = "creationTime")] @@ -849,7 +840,7 @@ pub struct MetricFilter { ///

Represents a matched event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MetricFilterMatchRecord { ///

The raw event data.

#[serde(rename = "eventMessage")] @@ -885,7 +876,7 @@ pub struct MetricTransformation { ///

Represents a log event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OutputLogEvent { ///

The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

#[serde(rename = "ingestionTime")] @@ -925,7 +916,7 @@ pub struct PutDestinationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutDestinationResponse { ///

The destination.

#[serde(rename = "destination")] @@ -951,7 +942,7 @@ pub struct PutLogEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLogEventsResponse { ///

The next sequence token.

#[serde(rename = "nextSequenceToken")] @@ -992,7 +983,7 @@ pub struct PutResourcePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutResourcePolicyResponse { ///

The new policy.

#[serde(rename = "resourcePolicy")] @@ -1053,7 +1044,7 @@ pub struct QueryCompileErrorLocation { ///

Information about one CloudWatch Logs Insights query that matches the request in a DescribeQueries operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryInfo { ///

The date and time that this query was created.

#[serde(rename = "createTime")] @@ -1079,7 +1070,7 @@ pub struct QueryInfo { ///

Contains the number of log events scanned by the query, the number of log events that matched the query criteria, and the total number of bytes in the log events that were scanned.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryStatistics { ///

The total number of bytes in the log events scanned during the query.

#[serde(rename = "bytesScanned")] @@ -1097,7 +1088,7 @@ pub struct QueryStatistics { ///

Represents the rejected events.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectedLogEventsInfo { ///

The expired log events.

#[serde(rename = "expiredLogEventEndIndex")] @@ -1115,7 +1106,7 @@ pub struct RejectedLogEventsInfo { ///

A policy enabling one or more entities to put logs to a log group in this account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourcePolicy { ///

Timestamp showing when this policy was last updated, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

#[serde(rename = "lastUpdatedTime")] @@ -1133,7 +1124,7 @@ pub struct ResourcePolicy { ///

Contains one field from one log event returned by a CloudWatch Logs Insights query, along with the value of that field.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResultField { ///

The log event field.

#[serde(rename = "field")] @@ -1147,7 +1138,7 @@ pub struct ResultField { ///

Represents the search status of a log stream.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchedLogStream { ///

The name of the log stream.

#[serde(rename = "logStreamName")] @@ -1168,9 +1159,14 @@ pub struct StartQueryRequest { #[serde(rename = "limit")] #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, - ///

The log group on which to perform the query.

+ ///

The log group on which to perform the query.

A StartQuery operation must include a logGroupNames or a logGroupName parameter, but not both.

#[serde(rename = "logGroupName")] - pub log_group_name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub log_group_name: Option, + ///

The list of log groups to be queried. You can include up to 20 log groups.

A StartQuery operation must include a logGroupNames or a logGroupName parameter, but not both.

+ #[serde(rename = "logGroupNames")] + #[serde(skip_serializing_if = "Option::is_none")] + pub log_group_names: Option>, ///

The query string to use. For more information, see CloudWatch Logs Insights Query Syntax.

#[serde(rename = "queryString")] pub query_string: String, @@ -1180,7 +1176,7 @@ pub struct StartQueryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartQueryResponse { ///

The unique ID of the query.

#[serde(rename = "queryId")] @@ -1196,7 +1192,7 @@ pub struct StopQueryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopQueryResponse { ///

This is true if the query was stopped by the StopQuery operation.

#[serde(rename = "success")] @@ -1206,7 +1202,7 @@ pub struct StopQueryResponse { ///

Represents a subscription filter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscriptionFilter { ///

The creation time of the subscription filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

#[serde(rename = "creationTime")] @@ -1256,7 +1252,7 @@ pub struct TestMetricFilterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TestMetricFilterResponse { ///

The matched events.

#[serde(rename = "matches")] @@ -3235,7 +3231,7 @@ pub trait CloudWatchLogs { input: CancelExportTaskRequest, ) -> RusotoFuture<(), CancelExportTaskError>; - ///

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

+ ///

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets encrypted with SSE-KMS is not supported.

fn create_export_task( &self, input: CreateExportTaskRequest, @@ -3373,7 +3369,7 @@ pub trait CloudWatchLogs { input: GetLogRecordRequest, ) -> RusotoFuture; - ///

Returns the results from the specified query. If the query is in progress, partial results of that current execution are returned.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use .

+ ///

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use .

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

fn get_query_results( &self, input: GetQueryResultsRequest, @@ -3385,7 +3381,7 @@ pub trait CloudWatchLogs { input: ListTagsLogGroupRequest, ) -> RusotoFuture; - ///

Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents. Currently, the only supported physical resource is a Kinesis stream belonging to the same account as the destination.

Through an access policy, a destination controls what is written to its Kinesis stream. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

+ ///

Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents. A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose strea, or an AWS Lambda function.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

fn put_destination( &self, input: PutDestinationRequest, @@ -3463,10 +3459,7 @@ impl CloudWatchLogsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CloudWatchLogsClient { - CloudWatchLogsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3480,10 +3473,14 @@ impl CloudWatchLogsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - CloudWatchLogsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> CloudWatchLogsClient { + CloudWatchLogsClient { client, region } } } @@ -3540,7 +3537,7 @@ impl CloudWatchLogs for CloudWatchLogsClient { }) } - ///

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

+ ///

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets encrypted with SSE-KMS is not supported.

fn create_export_task( &self, input: CreateExportTaskRequest, @@ -4163,7 +4160,7 @@ impl CloudWatchLogs for CloudWatchLogsClient { }) } - ///

Returns the results from the specified query. If the query is in progress, partial results of that current execution are returned.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use .

+ ///

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use .

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

fn get_query_results( &self, input: GetQueryResultsRequest, @@ -4221,7 +4218,7 @@ impl CloudWatchLogs for CloudWatchLogsClient { }) } - ///

Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents. Currently, the only supported physical resource is a Kinesis stream belonging to the same account as the destination.

Through an access policy, a destination controls what is written to its Kinesis stream. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

+ ///

Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents. A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose strea, or an AWS Lambda function.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

fn put_destination( &self, input: PutDestinationRequest, diff --git a/rusoto/services/logs/src/lib.rs b/rusoto/services/logs/src/lib.rs index 4e96d1f603d..7d09f6c1d72 100644 --- a/rusoto/services/logs/src/lib.rs +++ b/rusoto/services/logs/src/lib.rs @@ -30,15 +30,14 @@ //! extern crate rusoto_core; //! extern crate rusoto_logs; //! -//! use chrono::{Utc}; +//! use chrono::Utc; //! -//! use std::default::Default; //! use rusoto_core::Region; -//! use rusoto_logs::{CloudWatchLogs, -//! CloudWatchLogsClient, -//! DescribeLogStreamsRequest, -//! InputLogEvent, -//! PutLogEventsRequest}; +//! use rusoto_logs::{ +//! CloudWatchLogs, CloudWatchLogsClient, DescribeLogStreamsRequest, InputLogEvent, +//! PutLogEventsRequest, +//! }; +//! use std::default::Default; //! //! fn main() { //! const LOG_GROUP_NAME: &'static str = "testing"; @@ -57,9 +56,9 @@ //! let streams_resp = client.describe_log_streams(desc_streams_req).sync(); //! let log_streams = streams_resp.unwrap().log_streams.unwrap(); //! let stream = &log_streams -//! .iter() -//! .find(|s| s.log_stream_name == Some(LOG_STREAM_NAME.to_string())) -//! .unwrap(); +//! .iter() +//! .find(|s| s.log_stream_name == Some(LOG_STREAM_NAME.to_string())) +//! .unwrap(); //! let sequence_token = stream.upload_sequence_token.clone(); //! //! let put_log_events_request = PutLogEventsRequest { diff --git a/rusoto/services/machinelearning/Cargo.toml b/rusoto/services/machinelearning/Cargo.toml index 8c7f8788853..5c9bd167999 100644 --- a/rusoto/services/machinelearning/Cargo.toml +++ b/rusoto/services/machinelearning/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_machinelearning" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/machinelearning/README.md b/rusoto/services/machinelearning/README.md index 1e669b8eacb..0be2bdbcf82 100644 --- a/rusoto/services/machinelearning/README.md +++ b/rusoto/services/machinelearning/README.md @@ -23,9 +23,16 @@ To use `rusoto_machinelearning` in your application, add it as a dependency in y ```toml [dependencies] -rusoto_machinelearning = "0.40.0" +rusoto_machinelearning = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/machinelearning/src/custom/mod.rs b/rusoto/services/machinelearning/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/machinelearning/src/custom/mod.rs +++ b/rusoto/services/machinelearning/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/machinelearning/src/generated.rs b/rusoto/services/machinelearning/src/generated.rs index edc5257e6f9..ce761f14d24 100644 --- a/rusoto/services/machinelearning/src/generated.rs +++ b/rusoto/services/machinelearning/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -39,7 +38,7 @@ pub struct AddTagsInput { ///

Amazon ML returns the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsOutput { ///

The ID of the ML object that was tagged.

#[serde(rename = "ResourceId")] @@ -53,7 +52,7 @@ pub struct AddTagsOutput { ///

Represents the output of a GetBatchPrediction operation.

The content consists of the detailed metadata, the status, and the data file information of a Batch Prediction.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchPrediction { ///

The ID of the DataSource that points to the group of observations to predict.

#[serde(rename = "BatchPredictionDataSourceId")] @@ -138,7 +137,7 @@ pub struct CreateBatchPredictionInput { ///

Represents the output of a CreateBatchPrediction operation, and is an acknowledgement that Amazon ML received the request.

The CreateBatchPrediction operation is asynchronous. You can poll for status updates by using the >GetBatchPrediction operation and checking the Status parameter of the result.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBatchPredictionOutput { ///

A user-supplied ID that uniquely identifies the BatchPrediction. This value is identical to the value of the BatchPredictionId in the request.

#[serde(rename = "BatchPredictionId")] @@ -169,7 +168,7 @@ pub struct CreateDataSourceFromRDSInput { ///

Represents the output of a CreateDataSourceFromRDS operation, and is an acknowledgement that Amazon ML received the request.

The CreateDataSourceFromRDS> operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter. You can inspect the Message when Status shows up as FAILED. You can also check the progress of the copy operation by going to the DataPipeline console and looking up the pipeline using the pipelineId from the describe call.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDataSourceFromRDSOutput { ///

A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

#[serde(rename = "DataSourceId")] @@ -200,7 +199,7 @@ pub struct CreateDataSourceFromRedshiftInput { ///

Represents the output of a CreateDataSourceFromRedshift operation, and is an acknowledgement that Amazon ML received the request.

The CreateDataSourceFromRedshift operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDataSourceFromRedshiftOutput { ///

A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

#[serde(rename = "DataSourceId")] @@ -228,7 +227,7 @@ pub struct CreateDataSourceFromS3Input { ///

Represents the output of a CreateDataSourceFromS3 operation, and is an acknowledgement that Amazon ML received the request.

The CreateDataSourceFromS3 operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDataSourceFromS3Output { ///

A user-supplied ID that uniquely identifies the DataSource. This value should be identical to the value of the DataSourceID in the request.

#[serde(rename = "DataSourceId")] @@ -255,7 +254,7 @@ pub struct CreateEvaluationInput { ///

Represents the output of a CreateEvaluation operation, and is an acknowledgement that Amazon ML received the request.

CreateEvaluation operation is asynchronous. You can poll for status updates by using the GetEvcaluation operation and checking the Status parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEvaluationOutput { ///

The user-supplied ID that uniquely identifies the Evaluation. This value should be identical to the value of the EvaluationId in the request.

#[serde(rename = "EvaluationId")] @@ -294,7 +293,7 @@ pub struct CreateMLModelInput { ///

Represents the output of a CreateMLModel operation, and is an acknowledgement that Amazon ML received the request.

The CreateMLModel operation is asynchronous. You can poll for status updates by using the GetMLModel operation and checking the Status parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateMLModelOutput { ///

A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

#[serde(rename = "MLModelId")] @@ -311,7 +310,7 @@ pub struct CreateRealtimeEndpointInput { ///

Represents the output of an CreateRealtimeEndpoint operation.

The result contains the MLModelId and the endpoint information for the MLModel.

The endpoint information includes the URI of the MLModel; that is, the location to send online prediction requests for the specified MLModel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRealtimeEndpointOutput { ///

A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

#[serde(rename = "MLModelId")] @@ -325,7 +324,7 @@ pub struct CreateRealtimeEndpointOutput { ///

Represents the output of the GetDataSource operation.

The content consists of the detailed metadata and data file information and the current status of the DataSource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DataSource { ///

The parameter is true if statistics need to be generated from the observation data.

#[serde(rename = "ComputeStatistics")] @@ -404,7 +403,7 @@ pub struct DeleteBatchPredictionInput { ///

Represents the output of a DeleteBatchPrediction operation.

You can use the GetBatchPrediction operation and check the value of the Status parameter to see whether a BatchPrediction is marked as DELETED.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBatchPredictionOutput { ///

A user-supplied ID that uniquely identifies the BatchPrediction. This value should be identical to the value of the BatchPredictionID in the request.

#[serde(rename = "BatchPredictionId")] @@ -421,7 +420,7 @@ pub struct DeleteDataSourceInput { ///

Represents the output of a DeleteDataSource operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDataSourceOutput { ///

A user-supplied ID that uniquely identifies the DataSource. This value should be identical to the value of the DataSourceID in the request.

#[serde(rename = "DataSourceId")] @@ -438,7 +437,7 @@ pub struct DeleteEvaluationInput { ///

Represents the output of a DeleteEvaluation operation. The output indicates that Amazon Machine Learning (Amazon ML) received the request.

You can use the GetEvaluation operation and check the value of the Status parameter to see whether an Evaluation is marked as DELETED.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteEvaluationOutput { ///

A user-supplied ID that uniquely identifies the Evaluation. This value should be identical to the value of the EvaluationId in the request.

#[serde(rename = "EvaluationId")] @@ -455,7 +454,7 @@ pub struct DeleteMLModelInput { ///

Represents the output of a DeleteMLModel operation.

You can use the GetMLModel operation and check the value of the Status parameter to see whether an MLModel is marked as DELETED.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteMLModelOutput { ///

A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelID in the request.

#[serde(rename = "MLModelId")] @@ -472,7 +471,7 @@ pub struct DeleteRealtimeEndpointInput { ///

Represents the output of an DeleteRealtimeEndpoint operation.

The result contains the MLModelId and the endpoint information for the MLModel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRealtimeEndpointOutput { ///

A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

#[serde(rename = "MLModelId")] @@ -499,7 +498,7 @@ pub struct DeleteTagsInput { ///

Amazon ML returns the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTagsOutput { ///

The ID of the ML object from which tags were deleted.

#[serde(rename = "ResourceId")] @@ -561,7 +560,7 @@ pub struct DescribeBatchPredictionsInput { ///

Represents the output of a DescribeBatchPredictions operation. The content is essentially a list of BatchPredictions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBatchPredictionsOutput { ///

The ID of the next page in the paginated results that indicates at least one more page follows.

#[serde(rename = "NextToken")] @@ -623,7 +622,7 @@ pub struct DescribeDataSourcesInput { ///

Represents the query results from a DescribeDataSources operation. The content is essentially a list of DataSource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDataSourcesOutput { ///

An ID of the next page in the paginated results that indicates at least one more page follows.

#[serde(rename = "NextToken")] @@ -685,7 +684,7 @@ pub struct DescribeEvaluationsInput { ///

Represents the query results from a DescribeEvaluations operation. The content is essentially a list of Evaluation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEvaluationsOutput { ///

The ID of the next page in the paginated results that indicates at least one more page follows.

#[serde(rename = "NextToken")] @@ -747,7 +746,7 @@ pub struct DescribeMLModelsInput { ///

Represents the output of a DescribeMLModels operation. The content is essentially a list of MLModel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMLModelsOutput { ///

The ID of the next page in the paginated results that indicates at least one more page follows.

#[serde(rename = "NextToken")] @@ -771,7 +770,7 @@ pub struct DescribeTagsInput { ///

Amazon ML returns the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTagsOutput { ///

The ID of the tagged ML object.

#[serde(rename = "ResourceId")] @@ -789,7 +788,7 @@ pub struct DescribeTagsOutput { ///

Represents the output of GetEvaluation operation.

The content consists of the detailed metadata and data file information and the current status of the Evaluation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Evaluation { #[serde(rename = "ComputeTime")] #[serde(skip_serializing_if = "Option::is_none")] @@ -855,7 +854,7 @@ pub struct GetBatchPredictionInput { ///

Represents the output of a GetBatchPrediction operation and describes a BatchPrediction.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetBatchPredictionOutput { ///

The ID of the DataSource that was used to create the BatchPrediction.

#[serde(rename = "BatchPredictionDataSourceId")] @@ -940,7 +939,7 @@ pub struct GetDataSourceInput { ///

Represents the output of a GetDataSource operation and describes a DataSource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDataSourceOutput { ///

The parameter is true if statistics need to be generated from the observation data.

#[serde(rename = "ComputeStatistics")] @@ -1030,7 +1029,7 @@ pub struct GetEvaluationInput { ///

Represents the output of a GetEvaluation operation and describes an Evaluation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetEvaluationOutput { ///

The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the Evaluation, normalized and scaled on computation resources. ComputeTime is only available if the Evaluation is in the COMPLETED state.

#[serde(rename = "ComputeTime")] @@ -1107,7 +1106,7 @@ pub struct GetMLModelInput { ///

Represents the output of a GetMLModel operation, and provides detailed information about a MLModel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMLModelOutput { ///

The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the MLModel, normalized and scaled on computation resources. ComputeTime is only available if the MLModel is in the COMPLETED state.

#[serde(rename = "ComputeTime")] @@ -1196,7 +1195,7 @@ pub struct GetMLModelOutput { ///

Represents the output of a GetMLModel operation.

The content consists of the detailed metadata and the current status of the MLModel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MLModel { ///

The algorithm used to train the MLModel. The following algorithm is supported:

  • SGD -- Stochastic gradient descent. The goal of SGD is to minimize the gradient of the loss function.

#[serde(rename = "Algorithm")] @@ -1273,7 +1272,7 @@ pub struct MLModel { ///

Measurements of how well the MLModel performed on known observations. One of the following metrics is returned, based on the type of the MLModel:

  • BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

  • RegressionRMSE: The regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

  • MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique to measure performance.

For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PerformanceMetrics { #[serde(rename = "Properties")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1292,7 +1291,7 @@ pub struct PredictInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PredictOutput { #[serde(rename = "Prediction")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1301,7 +1300,7 @@ pub struct PredictOutput { ///

The output from a Predict operation:

  • Details - Contains the following attributes: DetailsAttributes.PREDICTIVEMODELTYPE - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD

  • PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request.

  • PredictedScores - Contains the raw classification score corresponding to each label.

  • PredictedValue - Present for a REGRESSION MLModel request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Prediction { #[serde(rename = "details")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1381,7 +1380,7 @@ pub struct RDSDatabaseCredentials { ///

The datasource details that are specific to Amazon RDS.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RDSMetadata { ///

The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.

#[serde(rename = "DataPipelineId")] @@ -1410,7 +1409,7 @@ pub struct RDSMetadata { ///

Describes the real-time endpoint information for an MLModel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RealtimeEndpointInfo { ///

The time that the request to create the real-time endpoint for the MLModel was received. The time is expressed in epoch time.

#[serde(rename = "CreatedAt")] @@ -1479,7 +1478,7 @@ pub struct RedshiftDatabaseCredentials { ///

Describes the DataSource details specific to Amazon Redshift.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RedshiftMetadata { #[serde(rename = "DatabaseUserName")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1538,7 +1537,7 @@ pub struct UpdateBatchPredictionInput { ///

Represents the output of an UpdateBatchPrediction operation.

You can see the updated content by using the GetBatchPrediction operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBatchPredictionOutput { ///

The ID assigned to the BatchPrediction during creation. This value should be identical to the value of the BatchPredictionId in the request.

#[serde(rename = "BatchPredictionId")] @@ -1558,7 +1557,7 @@ pub struct UpdateDataSourceInput { ///

Represents the output of an UpdateDataSource operation.

You can see the updated content by using the GetBatchPrediction operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDataSourceOutput { ///

The ID assigned to the DataSource during creation. This value should be identical to the value of the DataSourceID in the request.

#[serde(rename = "DataSourceId")] @@ -1578,7 +1577,7 @@ pub struct UpdateEvaluationInput { ///

Represents the output of an UpdateEvaluation operation.

You can see the updated content by using the GetEvaluation operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEvaluationOutput { ///

The ID assigned to the Evaluation during creation. This value should be identical to the value of the Evaluation in the request.

#[serde(rename = "EvaluationId")] @@ -1603,7 +1602,7 @@ pub struct UpdateMLModelInput { ///

Represents the output of an UpdateMLModel operation.

You can see the updated content by using the GetMLModel operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMLModelOutput { ///

The ID assigned to the MLModel during creation. This value should be identical to the value of the MLModelID in the request.

#[serde(rename = "MLModelId")] @@ -3099,10 +3098,7 @@ impl MachineLearningClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MachineLearningClient { - MachineLearningClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3116,10 +3112,14 @@ impl MachineLearningClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MachineLearningClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MachineLearningClient { + MachineLearningClient { client, region } } } diff --git a/rusoto/services/macie/Cargo.toml b/rusoto/services/macie/Cargo.toml index e0514c316fc..51ef6e586f4 100644 --- a/rusoto/services/macie/Cargo.toml +++ b/rusoto/services/macie/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_macie" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/macie/README.md b/rusoto/services/macie/README.md index ab4c462ae85..3415fc88de2 100644 --- a/rusoto/services/macie/README.md +++ b/rusoto/services/macie/README.md @@ -23,9 +23,16 @@ To use `rusoto_macie` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_macie = "0.40.0" +rusoto_macie = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/macie/src/custom/mod.rs b/rusoto/services/macie/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/macie/src/custom/mod.rs +++ b/rusoto/services/macie/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/macie/src/generated.rs b/rusoto/services/macie/src/generated.rs index f818144ca19..9171d30656c 100644 --- a/rusoto/services/macie/src/generated.rs +++ b/rusoto/services/macie/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -43,7 +42,7 @@ pub struct AssociateS3ResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateS3ResourcesResult { ///

S3 resources that couldn't be associated with Amazon Macie. An error code and an error message are provided for each failed item.

#[serde(rename = "failedS3Resources")] @@ -94,7 +93,7 @@ pub struct DisassociateS3ResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateS3ResourcesResult { ///

S3 resources that couldn't be removed from being monitored and classified by Amazon Macie. An error code and an error message are provided for each failed item.

#[serde(rename = "failedS3Resources")] @@ -104,7 +103,7 @@ pub struct DisassociateS3ResourcesResult { ///

Includes details about the failed S3 resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedS3Resource { ///

The status code of a failed item.

#[serde(rename = "errorCode")] @@ -133,7 +132,7 @@ pub struct ListMemberAccountsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListMemberAccountsResult { ///

A list of the Amazon Macie member accounts returned by the action. The current master account is also included in this list.

#[serde(rename = "memberAccounts")] @@ -162,7 +161,7 @@ pub struct ListS3ResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListS3ResourcesResult { ///

When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

#[serde(rename = "nextToken")] @@ -176,7 +175,7 @@ pub struct ListS3ResourcesResult { ///

Contains information about the Amazon Macie member account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MemberAccount { ///

The AWS account ID of the Amazon Macie member account.

#[serde(rename = "accountId")] @@ -238,7 +237,7 @@ pub struct UpdateS3ResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateS3ResourcesResult { ///

The S3 resources whose classification types can't be updated. An error code and an error message are provided for each failed item.

#[serde(rename = "failedS3Resources")] @@ -619,10 +618,7 @@ impl MacieClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MacieClient { - MacieClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -636,10 +632,14 @@ impl MacieClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MacieClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MacieClient { + MacieClient { client, region } } } diff --git a/rusoto/services/marketplace-entitlement/Cargo.toml b/rusoto/services/marketplace-entitlement/Cargo.toml index d3ef850537b..5da876e0574 100644 --- a/rusoto/services/marketplace-entitlement/Cargo.toml +++ b/rusoto/services/marketplace-entitlement/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_marketplace_entitlement" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/marketplace-entitlement/README.md b/rusoto/services/marketplace-entitlement/README.md index fa63476e001..a97e2044c5b 100644 --- a/rusoto/services/marketplace-entitlement/README.md +++ b/rusoto/services/marketplace-entitlement/README.md @@ -23,9 +23,16 @@ To use `rusoto_marketplace_entitlement` in your application, add it as a depende ```toml [dependencies] -rusoto_marketplace_entitlement = "0.40.0" +rusoto_marketplace_entitlement = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/marketplace-entitlement/src/custom/mod.rs b/rusoto/services/marketplace-entitlement/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/marketplace-entitlement/src/custom/mod.rs +++ b/rusoto/services/marketplace-entitlement/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/marketplace-entitlement/src/generated.rs b/rusoto/services/marketplace-entitlement/src/generated.rs index 19f5cec2b4a..76b4897db9a 100644 --- a/rusoto/services/marketplace-entitlement/src/generated.rs +++ b/rusoto/services/marketplace-entitlement/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An entitlement represents capacity in a product owned by the customer. For example, a customer might own some number of users or seats in an SaaS application or some amount of data capacity in a multi-tenant database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Entitlement { ///

The customer identifier is a handle to each unique customer in an application. Customer identifiers are obtained through the ResolveCustomer operation in AWS Marketplace Metering Service.

#[serde(rename = "CustomerIdentifier")] @@ -52,7 +51,7 @@ pub struct Entitlement { ///

The EntitlementValue represents the amount of capacity that the customer is entitled to for the product.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EntitlementValue { ///

The BooleanValue field will be populated with a boolean value when the entitlement is a boolean type. Otherwise, the field will not be set.

#[serde(rename = "BooleanValue")] @@ -94,7 +93,7 @@ pub struct GetEntitlementsRequest { ///

The GetEntitlementsRequest contains results from the GetEntitlements operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetEntitlementsResult { ///

The set of entitlements found through the GetEntitlements operation. If the result contains an empty set of entitlements, NextToken might still be present and should be used.

#[serde(rename = "Entitlements")] @@ -173,10 +172,7 @@ impl MarketplaceEntitlementClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MarketplaceEntitlementClient { - MarketplaceEntitlementClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -190,10 +186,14 @@ impl MarketplaceEntitlementClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MarketplaceEntitlementClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MarketplaceEntitlementClient { + MarketplaceEntitlementClient { client, region } } } diff --git a/rusoto/services/marketplacecommerceanalytics/Cargo.toml b/rusoto/services/marketplacecommerceanalytics/Cargo.toml index c48693ffad6..414952a7d2e 100644 --- a/rusoto/services/marketplacecommerceanalytics/Cargo.toml +++ b/rusoto/services/marketplacecommerceanalytics/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_marketplacecommerceanalytics" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/marketplacecommerceanalytics/README.md b/rusoto/services/marketplacecommerceanalytics/README.md index b21695bd84a..da4a80ce23f 100644 --- a/rusoto/services/marketplacecommerceanalytics/README.md +++ b/rusoto/services/marketplacecommerceanalytics/README.md @@ -23,9 +23,16 @@ To use `rusoto_marketplacecommerceanalytics` in your application, add it as a de ```toml [dependencies] -rusoto_marketplacecommerceanalytics = "0.40.0" +rusoto_marketplacecommerceanalytics = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/marketplacecommerceanalytics/src/custom/mod.rs b/rusoto/services/marketplacecommerceanalytics/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/marketplacecommerceanalytics/src/custom/mod.rs +++ b/rusoto/services/marketplacecommerceanalytics/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/marketplacecommerceanalytics/src/generated.rs b/rusoto/services/marketplacecommerceanalytics/src/generated.rs index ddea5ee25e6..8a96d0662bc 100644 --- a/rusoto/services/marketplacecommerceanalytics/src/generated.rs +++ b/rusoto/services/marketplacecommerceanalytics/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -34,7 +33,7 @@ pub struct GenerateDataSetRequest { ///

The date a data set was published. For daily data sets, provide a date with day-level granularity for the desired day. For weekly data sets, provide a date with day-level granularity within the desired week (the day value will be ignored). For monthly data sets, provide a date with month-level granularity for the desired month (the day value will be ignored).

#[serde(rename = "dataSetPublicationDate")] pub data_set_publication_date: f64, - ///

The desired data set type.

  • customer_subscriber_hourly_monthly_subscriptions

    From 2014-07-21 to present: Available daily by 5:00 PM Pacific Time.

  • customer_subscriber_annual_subscriptions

    From 2014-07-21 to present: Available daily by 5:00 PM Pacific Time.

  • daily_business_usage_by_instance_type

    From 2015-01-26 to present: Available daily by 5:00 PM Pacific Time.

  • daily_business_fees

    From 2015-01-26 to present: Available daily by 5:00 PM Pacific Time.

  • daily_business_free_trial_conversions

    From 2015-01-26 to present: Available daily by 5:00 PM Pacific Time.

  • daily_business_new_instances

    From 2015-01-26 to present: Available daily by 5:00 PM Pacific Time.

  • daily_business_new_product_subscribers

    From 2015-01-26 to present: Available daily by 5:00 PM Pacific Time.

  • daily_business_canceled_product_subscribers

    From 2015-01-26 to present: Available daily by 5:00 PM Pacific Time.

  • monthly_revenue_billing_and_revenue_data

    From 2015-02 to 2017-06: Available monthly on the 4th day of the month by 5:00pm Pacific Time. Data includes metered transactions (e.g. hourly) from two months prior.

    From 2017-07 to present: Available monthly on the 15th day of the month by 5:00pm Pacific Time. Data includes metered transactions (e.g. hourly) from one month prior.

  • monthly_revenue_annual_subscriptions

    From 2015-02 to 2017-06: Available monthly on the 4th day of the month by 5:00pm Pacific Time. Data includes up-front software charges (e.g. annual) from one month prior.

    From 2017-07 to present: Available monthly on the 15th day of the month by 5:00pm Pacific Time. Data includes up-front software charges (e.g. annual) from one month prior.

  • disbursed_amount_by_product

    From 2015-01-26 to present: Available every 30 days by 5:00 PM Pacific Time.

  • disbursed_amount_by_product_with_uncollected_funds

    From 2012-04-19 to 2015-01-25: Available every 30 days by 5:00 PM Pacific Time.

    From 2015-01-26 to present: This data set was split into three data sets: disbursed_amount_by_product, disbursed_amount_by_age_of_uncollected_funds, and disbursed_amount_by_age_of_disbursed_funds.

  • disbursed_amount_by_instance_hours

    From 2012-09-04 to present: Available every 30 days by 5:00 PM Pacific Time.

  • disbursed_amount_by_customer_geo

    From 2012-04-19 to present: Available every 30 days by 5:00 PM Pacific Time.

  • disbursed_amount_by_age_of_uncollected_funds

    From 2015-01-26 to present: Available every 30 days by 5:00 PM Pacific Time.

  • disbursed_amount_by_age_of_disbursed_funds

    From 2015-01-26 to present: Available every 30 days by 5:00 PM Pacific Time.

  • customer_profile_by_industry

    From 2015-10-01 to 2017-06-29: Available daily by 5:00 PM Pacific Time.

    From 2017-06-30 to present: This data set is no longer available.

  • customer_profile_by_revenue

    From 2015-10-01 to 2017-06-29: Available daily by 5:00 PM Pacific Time.

    From 2017-06-30 to present: This data set is no longer available.

  • customer_profile_by_geography

    From 2015-10-01 to 2017-06-29: Available daily by 5:00 PM Pacific Time.

    From 2017-06-30 to present: This data set is no longer available.

  • sales_compensation_billed_revenue

    From 2016-12 to 2017-06: Available monthly on the 4th day of the month by 5:00pm Pacific Time. Data includes metered transactions (e.g. hourly) from two months prior, and up-front software charges (e.g. annual) from one month prior.

    From 2017-06 to present: Available monthly on the 15th day of the month by 5:00pm Pacific Time. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.

  • us_sales_and_use_tax_records

    From 2017-02-15 to present: Available monthly on the 15th day of the month by 5:00 PM Pacific Time.

+ ///

The desired data set type.

  • customer_subscriber_hourly_monthly_subscriptions

    From 2014-07-21 to present: Available daily by 24:00 UTC.

  • customer_subscriber_annual_subscriptions

    From 2014-07-21 to present: Available daily by 24:00 UTC.

  • daily_business_usage_by_instance_type

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_fees

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_free_trial_conversions

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_new_instances

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_new_product_subscribers

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_canceled_product_subscribers

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • monthly_revenue_billing_and_revenue_data

    From 2015-02 to 2017-06: Available monthly on the 4th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from two months prior.

    From 2017-07 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.

  • monthly_revenue_annual_subscriptions

    From 2015-02 to 2017-06: Available monthly on the 4th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

    From 2017-07 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

  • monthly_revenue_field_demonstration_usage

    From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • monthly_revenue_flexible_payment_schedule

    From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • disbursed_amount_by_product

    From 2015-01-26 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_product_with_uncollected_funds

    From 2012-04-19 to 2015-01-25: Available every 30 days by 24:00 UTC.

    From 2015-01-26 to present: This data set was split into three data sets: disbursed_amount_by_product, disbursed_amount_by_age_of_uncollected_funds, and disbursed_amount_by_age_of_disbursed_funds.

  • disbursed_amount_by_instance_hours

    From 2012-09-04 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_customer_geo

    From 2012-04-19 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_uncollected_funds

    From 2015-01-26 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_disbursed_funds

    From 2015-01-26 to present: Available every 30 days by 24:00 UTC.

  • customer_profile_by_industry

    From 2015-10-01 to 2017-06-29: Available daily by 24:00 UTC.

    From 2017-06-30 to present: This data set is no longer available.

  • customer_profile_by_revenue

    From 2015-10-01 to 2017-06-29: Available daily by 24:00 UTC.

    From 2017-06-30 to present: This data set is no longer available.

  • customer_profile_by_geography

    From 2015-10-01 to 2017-06-29: Available daily by 24:00 UTC.

    From 2017-06-30 to present: This data set is no longer available.

  • sales_compensation_billed_revenue

    From 2016-12 to 2017-06: Available monthly on the 4th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from two months prior, and up-front software charges (e.g. annual) from one month prior.

    From 2017-06 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.

  • us_sales_and_use_tax_records

    From 2017-02-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

#[serde(rename = "dataSetType")] pub data_set_type: String, ///

The name (friendly name, not ARN) of the destination S3 bucket.

@@ -54,7 +53,7 @@ pub struct GenerateDataSetRequest { ///

Container for the result of the GenerateDataSet operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenerateDataSetResult { ///

A unique identifier representing a specific request to the GenerateDataSet operation. This identifier can be used to correlate a request with notifications from the SNS topic.

#[serde(rename = "dataSetRequestId")] @@ -92,7 +91,7 @@ pub struct StartSupportDataExportRequest { ///

Container for the result of the StartSupportDataExport operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSupportDataExportResult { ///

A unique identifier representing a specific request to the StartSupportDataExport operation. This identifier can be used to correlate a request with notifications from the SNS topic.

#[serde(rename = "dataSetRequestId")] @@ -196,10 +195,7 @@ impl MarketplaceCommerceAnalyticsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MarketplaceCommerceAnalyticsClient { - MarketplaceCommerceAnalyticsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -213,10 +209,17 @@ impl MarketplaceCommerceAnalyticsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MarketplaceCommerceAnalyticsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client( + client: Client, + region: region::Region, + ) -> MarketplaceCommerceAnalyticsClient { + MarketplaceCommerceAnalyticsClient { client, region } } } diff --git a/rusoto/services/mediaconvert/Cargo.toml b/rusoto/services/mediaconvert/Cargo.toml index 9df12b199ef..d08d1ea44ea 100644 --- a/rusoto/services/mediaconvert/Cargo.toml +++ b/rusoto/services/mediaconvert/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mediaconvert" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mediaconvert/README.md b/rusoto/services/mediaconvert/README.md index f908a8315d1..0b711df655a 100644 --- a/rusoto/services/mediaconvert/README.md +++ b/rusoto/services/mediaconvert/README.md @@ -23,9 +23,16 @@ To use `rusoto_mediaconvert` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_mediaconvert = "0.40.0" +rusoto_mediaconvert = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mediaconvert/src/custom/mod.rs b/rusoto/services/mediaconvert/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mediaconvert/src/custom/mod.rs +++ b/rusoto/services/mediaconvert/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mediaconvert/src/generated.rs b/rusoto/services/mediaconvert/src/generated.rs index 12e15e34b2d..d6d4250ad0b 100644 --- a/rusoto/services/mediaconvert/src/generated.rs +++ b/rusoto/services/mediaconvert/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -32,7 +31,7 @@ pub struct AacSettings { #[serde(rename = "AudioDescriptionBroadcasterMix")] #[serde(skip_serializing_if = "Option::is_none")] pub audio_description_broadcaster_mix: Option, - ///

Average bitrate in bits/second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values you choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate (sampleRate). Default values depend on Bitrate control mode and Profile.

+ ///

Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate (sampleRate). Default values depend on Bitrate control mode and Profile.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, @@ -69,11 +68,11 @@ pub struct AacSettings { ///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Ac3Settings { - ///

Average bitrate in bits/second. Valid bitrates depend on the coding mode.

+ ///

Specify the average bitrate in bits per second. Valid bitrates depend on the coding mode.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, - ///

Specifies the "Bitstream Mode" (bsmod) for the emitted AC-3 stream. See ATSC A/52-2012 for background on these values.

+ ///

Specify the bitstream mode for the AC-3 stream that the encoder emits. For more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex E).

#[serde(rename = "BitstreamMode")] #[serde(skip_serializing_if = "Option::is_none")] pub bitstream_mode: Option, @@ -97,7 +96,7 @@ pub struct Ac3Settings { #[serde(rename = "MetadataControl")] #[serde(skip_serializing_if = "Option::is_none")] pub metadata_control: Option, - ///

Sample rate in hz. Sample rate is always 48000.

+ ///

This value is always 48000. It represents the sample rate in Hz.

#[serde(rename = "SampleRate")] #[serde(skip_serializing_if = "Option::is_none")] pub sample_rate: Option, @@ -118,7 +117,7 @@ pub struct AiffSettings { #[serde(rename = "BitDepth")] #[serde(skip_serializing_if = "Option::is_none")] pub bit_depth: Option, - ///

Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2.

+ ///

Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.

#[serde(rename = "Channels")] #[serde(skip_serializing_if = "Option::is_none")] pub channels: Option, @@ -131,10 +130,18 @@ pub struct AiffSettings { ///

Settings for ancillary captions source.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AncillarySourceSettings { + ///

Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.

+ #[serde(rename = "Convert608To708")] + #[serde(skip_serializing_if = "Option::is_none")] + pub convert_608_to_708: Option, ///

Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for passthrough.

#[serde(rename = "SourceAncillaryChannelNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub source_ancillary_channel_number: Option, + ///

By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting.

+ #[serde(rename = "TerminateCaptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub terminate_captions: Option, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -145,10 +152,10 @@ pub struct AssociateCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateCertificateResponse {} -///

Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value you choose for Audio codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings

+///

Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AudioCodecSettings { ///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode.

@@ -167,6 +174,10 @@ pub struct AudioCodecSettings { #[serde(rename = "Codec")] #[serde(skip_serializing_if = "Option::is_none")] pub codec: Option, + ///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS.

+ #[serde(rename = "Eac3AtmosSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub eac_3_atmos_settings: Option, ///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3.

#[serde(rename = "Eac3Settings")] #[serde(skip_serializing_if = "Option::is_none")] @@ -184,7 +195,7 @@ pub struct AudioCodecSettings { ///

Description of audio output

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AudioDescription { - ///

Advanced audio normalization settings.

+ ///

Advanced audio normalization settings. Ignore these settings unless you need to comply with a loudness standard.

#[serde(rename = "AudioNormalizationSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub audio_normalization_settings: Option, @@ -200,7 +211,7 @@ pub struct AudioDescription { #[serde(rename = "AudioTypeControl")] #[serde(skip_serializing_if = "Option::is_none")] pub audio_type_control: Option, - ///

Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value you choose for Audio codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings

+ ///

Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings

#[serde(rename = "CodecSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub codec_settings: Option, @@ -220,16 +231,16 @@ pub struct AudioDescription { #[serde(rename = "RemixSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub remix_settings: Option, - ///

Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by the player (eg. English, or Director Commentary). Alphanumeric characters, spaces, and underscore are legal.

+ ///

Specify a label for this output audio stream. For example, "English", "Director commentary", or "track_2". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.

#[serde(rename = "StreamName")] #[serde(skip_serializing_if = "Option::is_none")] pub stream_name: Option, } -///

Advanced audio normalization settings.

+///

Advanced audio normalization settings. Ignore these settings unless you need to comply with a loudness standard.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AudioNormalizationSettings { - ///

Audio normalization algorithm to use. 1770-1 conforms to the CALM Act specification, 1770-2 conforms to the EBU R-128 specification.

+ ///

Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: Ungated loudness. A measurement of ungated average loudness for an entire piece of content, suitable for measurement of short-form content under ATSC recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: Gated loudness. A measurement of gated average loudness compliant with the requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: Modified peak. The same loudness measurement algorithm as 1770-2, with an updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows for more audio channels than the other algorithms, including configurations such as 7.1.

#[serde(rename = "Algorithm")] #[serde(skip_serializing_if = "Option::is_none")] pub algorithm: Option, @@ -249,7 +260,7 @@ pub struct AudioNormalizationSettings { #[serde(rename = "PeakCalculation")] #[serde(skip_serializing_if = "Option::is_none")] pub peak_calculation: Option, - ///

Target LKFS(loudness) to adjust volume to. If no value is entered, a default value will be used according to the chosen algorithm. The CALM Act (1770-1) recommends a target of -24 LKFS. The EBU R-128 specification (1770-2) recommends a target of -23 LKFS.

+ ///

When you use Audio normalization (AudioNormalizationSettings), optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS.

#[serde(rename = "TargetLkfs")] #[serde(skip_serializing_if = "Option::is_none")] pub target_lkfs: Option, @@ -403,7 +414,7 @@ pub struct CancelJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelJobResponse {} ///

Description of Caption output

@@ -425,7 +436,7 @@ pub struct CaptionDescription { #[serde(rename = "LanguageCode")] #[serde(skip_serializing_if = "Option::is_none")] pub language_code: Option, - ///

Human readable information to indicate captions available for players (eg. English, or Spanish). Alphanumeric characters, spaces, and underscore are legal.

+ ///

Specify a label for this set of output captions. For example, "English", "Director commentary", or "track_2". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.

#[serde(rename = "LanguageDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub language_description: Option, @@ -446,7 +457,7 @@ pub struct CaptionDescriptionPreset { #[serde(rename = "LanguageCode")] #[serde(skip_serializing_if = "Option::is_none")] pub language_code: Option, - ///

Human readable information to indicate captions available for players (eg. English, or Spanish). Alphanumeric characters, spaces, and underscore are legal.

+ ///

Specify a label for this set of output captions. For example, "English", "Director commentary", or "track_2". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.

#[serde(rename = "LanguageDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub language_description: Option, @@ -459,7 +470,7 @@ pub struct CaptionDestinationSettings { #[serde(rename = "BurninDestinationSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub burnin_destination_settings: Option, - ///

Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20PLUSEMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDEDPLUSSCTE20).

+ ///

Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20PLUSEMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDEDPLUSSCTE20).

#[serde(rename = "DestinationType")] #[serde(skip_serializing_if = "Option::is_none")] pub destination_type: Option, @@ -471,6 +482,10 @@ pub struct CaptionDestinationSettings { #[serde(rename = "EmbeddedDestinationSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub embedded_destination_settings: Option, + ///

Settings specific to IMSC caption outputs.

+ #[serde(rename = "ImscDestinationSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub imsc_destination_settings: Option, ///

Settings for SCC caption output.

#[serde(rename = "SccDestinationSettings")] #[serde(skip_serializing_if = "Option::is_none")] @@ -496,13 +511,13 @@ pub struct CaptionSelector { #[serde(rename = "LanguageCode")] #[serde(skip_serializing_if = "Option::is_none")] pub language_code: Option, - ///

Source settings (SourceSettings) contains the group of settings for captions in the input.

+ ///

If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.

#[serde(rename = "SourceSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub source_settings: Option, } -///

Source settings (SourceSettings) contains the group of settings for captions in the input.

+///

If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CaptionSourceSettings { ///

Settings for ancillary captions source.

@@ -517,7 +532,7 @@ pub struct CaptionSourceSettings { #[serde(rename = "EmbeddedSourceSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub embedded_source_settings: Option, - ///

Settings for File-based Captions in Source

+ ///

If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.

#[serde(rename = "FileSourceSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub file_source_settings: Option, @@ -529,7 +544,7 @@ pub struct CaptionSourceSettings { #[serde(rename = "TeletextSourceSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub teletext_source_settings: Option, - ///

Settings specific to caption sources that are specfied by track number. Sources include IMSC in IMF.

+ ///

Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings.

#[serde(rename = "TrackSourceSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub track_source_settings: Option, @@ -551,19 +566,23 @@ pub struct CmafEncryptionSettings { #[serde(rename = "ConstantInitializationVector")] #[serde(skip_serializing_if = "Option::is_none")] pub constant_initialization_vector: Option, - ///

Encrypts the segments with the given encryption scheme. Leave blank to disable. Selecting 'Disabled' in the web interface also disables encryption.

+ ///

Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).

#[serde(rename = "EncryptionMethod")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption_method: Option, - ///

The Initialization Vector is a 128-bit number used in conjunction with the key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed in the manifest. Otherwise Initialization Vector is not in the manifest.

+ ///

When you use DRM with CMAF outputs, choose whether the service writes the 128-bit encryption initialization vector in the HLS and DASH manifests.

#[serde(rename = "InitializationVectorInManifest")] #[serde(skip_serializing_if = "Option::is_none")] pub initialization_vector_in_manifest: Option, + ///

If your output group type is CMAF, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or Microsoft Smooth, use the SpekeKeyProvider settings instead.

+ #[serde(rename = "SpekeKeyProvider")] + #[serde(skip_serializing_if = "Option::is_none")] + pub speke_key_provider: Option, ///

Use these settings to set up encryption with a static key provider.

#[serde(rename = "StaticKeyProvider")] #[serde(skip_serializing_if = "Option::is_none")] pub static_key_provider: Option, - ///

Indicates which type of key provider is used for encryption.

+ ///

Specify whether your DRM encryption key is static or from a key provider that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.

#[serde(rename = "Type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -645,7 +664,7 @@ pub struct ColorCorrector { #[serde(rename = "Brightness")] #[serde(skip_serializing_if = "Option::is_none")] pub brightness: Option, - ///

Determines if colorspace conversion will be performed. If set to None, no conversion will be performed. If Force 601 or Force 709 are selected, conversion will be performed for inputs with differing colorspaces. An input's colorspace can be specified explicitly in the "Video Selector":#inputs-video_selector if necessary.

+ ///

Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, and from SDR to HDR. The service doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output.

#[serde(rename = "ColorSpaceConversion")] #[serde(skip_serializing_if = "Option::is_none")] pub color_space_conversion: Option, @@ -653,7 +672,7 @@ pub struct ColorCorrector { #[serde(rename = "Contrast")] #[serde(skip_serializing_if = "Option::is_none")] pub contrast: Option, - ///

Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. Note that these settings are not color correction.

+ ///

Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion (ColorSpaceConversion) to HDR 10 (FORCE_HDR10), these settings are required. You must set values for Max frame average light level (maxFrameAverageLightLevel) and Max content light level (maxContentLightLevel); these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.

#[serde(rename = "Hdr10Metadata")] #[serde(skip_serializing_if = "Option::is_none")] pub hdr_10_metadata: Option, @@ -690,7 +709,7 @@ pub struct ContainerSettings { #[serde(rename = "MovSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub mov_settings: Option, - ///

Settings for MP4 Container

+ ///

Settings for MP4 container. You can create audio-only AAC outputs with this container.

#[serde(rename = "Mp4Settings")] #[serde(skip_serializing_if = "Option::is_none")] pub mp_4_settings: Option, @@ -714,6 +733,10 @@ pub struct CreateJobRequest { #[serde(rename = "JobTemplate")] #[serde(skip_serializing_if = "Option::is_none")] pub job_template: Option, + ///

Specify the relative priority for this job. In any given queue, the service begins processing the job with the highest value first. When more than one job has the same priority, the service begins processing the job that you submitted first. If you don't specify a priority, the service uses the default value 0.

+ #[serde(rename = "Priority")] + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, ///

Optional. When you create a job, you can specify a queue to send it to. If you don't specify, the job will go to the default queue. For more about queues, see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html.

#[serde(rename = "Queue")] #[serde(skip_serializing_if = "Option::is_none")] @@ -724,6 +747,10 @@ pub struct CreateJobRequest { ///

JobSettings contains all the transcode settings for a job.

#[serde(rename = "Settings")] pub settings: JobSettings, + ///

Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default.

+ #[serde(rename = "SimulateReservedQueue")] + #[serde(skip_serializing_if = "Option::is_none")] + pub simulate_reserved_queue: Option, ///

Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error.

#[serde(rename = "StatusUpdateInterval")] #[serde(skip_serializing_if = "Option::is_none")] @@ -735,7 +762,7 @@ pub struct CreateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobResponse { ///

Each job converts an input file into an output file or files. For more information, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html

#[serde(rename = "Job")] @@ -760,6 +787,10 @@ pub struct CreateJobTemplateRequest { ///

The name of the job template you are creating.

#[serde(rename = "Name")] pub name: String, + ///

Specify the relative priority for this job. In any given queue, the service begins processing the job with the highest value first. When more than one job has the same priority, the service begins processing the job that you submitted first. If you don't specify a priority, the service uses the default value 0.

+ #[serde(rename = "Priority")] + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, ///

Optional. The queue that jobs created from this template are assigned to. If you don't specify this, jobs will go to the default queue.

#[serde(rename = "Queue")] #[serde(skip_serializing_if = "Option::is_none")] @@ -778,7 +809,7 @@ pub struct CreateJobTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobTemplateResponse { ///

A job template is a pre-made set of encoding instructions that you can use to quickly create a job.

#[serde(rename = "JobTemplate")] @@ -809,7 +840,7 @@ pub struct CreatePresetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePresetResponse { ///

A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process.

#[serde(rename = "Preset")] @@ -834,6 +865,10 @@ pub struct CreateQueueRequest { #[serde(rename = "ReservationPlanSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub reservation_plan_settings: Option, + ///

Initial state of the queue. If you create a paused queue, then jobs in that queue won't begin.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, ///

The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -841,7 +876,7 @@ pub struct CreateQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateQueueResponse { ///

You can use queues to manage the resources that are available to your AWS account for running multiple transcoding jobs at the same time. If you don't specify a queue, the service sends all jobs through the default queue. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.

#[serde(rename = "Queue")] @@ -856,7 +891,7 @@ pub struct DashIsoEncryptionSettings { #[serde(rename = "PlaybackDeviceCompatibility")] #[serde(skip_serializing_if = "Option::is_none")] pub playback_device_compatibility: Option, - ///

Settings for use with a SPEKE key provider

+ ///

If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.

#[serde(rename = "SpekeKeyProvider")] #[serde(skip_serializing_if = "Option::is_none")] pub speke_key_provider: Option, @@ -934,7 +969,7 @@ pub struct DeleteJobTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteJobTemplateResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -945,7 +980,7 @@ pub struct DeletePresetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePresetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -956,7 +991,7 @@ pub struct DeleteQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteQueueResponse {} ///

DescribeEndpointsRequest

@@ -977,7 +1012,7 @@ pub struct DescribeEndpointsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointsResponse { ///

List of endpoints

#[serde(rename = "Endpoints")] @@ -1006,7 +1041,7 @@ pub struct DisassociateCertificateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateCertificateResponse {} ///

Inserts DVB Network Information Table (NIT) at the specified table repetition interval.

@@ -1142,6 +1177,75 @@ pub struct DvbTdtSettings { pub tdt_interval: Option, } +///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Eac3AtmosSettings { + ///

Specify the average bitrate in bits per second. + /// Valid values: 384k, 448k, 640k, 768k

+ #[serde(rename = "Bitrate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bitrate: Option, + ///

Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).

+ #[serde(rename = "BitstreamMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bitstream_mode: Option, + ///

The coding mode for Dolby Digital Plus JOC (Atmos) is always 9.1.6 (CODINGMODE916).

+ #[serde(rename = "CodingMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub coding_mode: Option, + ///

Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis.

+ #[serde(rename = "DialogueIntelligence")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dialogue_intelligence: Option, + ///

Specify the absolute peak level for a signal with dynamic range compression.

+ #[serde(rename = "DynamicRangeCompressionLine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dynamic_range_compression_line: Option, + ///

Specify how the service limits the audio dynamic range when compressing the audio.

+ #[serde(rename = "DynamicRangeCompressionRf")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dynamic_range_compression_rf: Option, + ///

Specify a value for the following Dolby Atmos setting: Left only/Right only center mix + /// (Lo/Ro center). MediaConvert uses this value for downmixing. How the service uses this + /// value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). + /// Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0.

+ #[serde(rename = "LoRoCenterMixLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub lo_ro_center_mix_level: Option, + ///

Specify a value for the following Dolby Atmos setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel.

+ #[serde(rename = "LoRoSurroundMixLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub lo_ro_surround_mix_level: Option, + ///

Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0.

+ #[serde(rename = "LtRtCenterMixLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub lt_rt_center_mix_level: Option, + ///

Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel.

+ #[serde(rename = "LtRtSurroundMixLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub lt_rt_surround_mix_level: Option, + ///

Choose how the service meters the loudness of your audio.

+ #[serde(rename = "MeteringMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub metering_mode: Option, + ///

This value is always 48000. It represents the sample rate in Hz.

+ #[serde(rename = "SampleRate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sample_rate: Option, + ///

Specify the percentage of audio content that must be speech before the encoder uses the measured speech loudness as the overall program loudness.

+ #[serde(rename = "SpeechThreshold")] + #[serde(skip_serializing_if = "Option::is_none")] + pub speech_threshold: Option, + ///

Choose how the service does stereo downmixing.

+ #[serde(rename = "StereoDownmix")] + #[serde(skip_serializing_if = "Option::is_none")] + pub stereo_downmix: Option, + ///

Specify whether your input audio has an additional center rear surround channel matrix encoded into your left and right surround channels.

+ #[serde(rename = "SurroundExMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub surround_ex_mode: Option, +} + ///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Eac3Settings { @@ -1149,11 +1253,11 @@ pub struct Eac3Settings { #[serde(rename = "AttenuationControl")] #[serde(skip_serializing_if = "Option::is_none")] pub attenuation_control: Option, - ///

Average bitrate in bits/second. Valid bitrates depend on the coding mode.

+ ///

Specify the average bitrate in bits per second. Valid bitrates depend on the coding mode.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, - ///

Specifies the "Bitstream Mode" (bsmod) for the emitted E-AC-3 stream. See ATSC A/52-2012 (Annex E) for background on these values.

+ ///

Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).

#[serde(rename = "BitstreamMode")] #[serde(skip_serializing_if = "Option::is_none")] pub bitstream_mode: Option, @@ -1169,11 +1273,11 @@ pub struct Eac3Settings { #[serde(rename = "Dialnorm")] #[serde(skip_serializing_if = "Option::is_none")] pub dialnorm: Option, - ///

Enables Dynamic Range Compression that restricts the absolute peak level for a signal.

+ ///

Specify the absolute peak level for a signal with dynamic range compression.

#[serde(rename = "DynamicRangeCompressionLine")] #[serde(skip_serializing_if = "Option::is_none")] pub dynamic_range_compression_line: Option, - ///

Enables Heavy Dynamic Range Compression, ensures that the instantaneous signal peaks do not exceed specified levels.

+ ///

Specify how the service limits the audio dynamic range when compressing the audio.

#[serde(rename = "DynamicRangeCompressionRf")] #[serde(skip_serializing_if = "Option::is_none")] pub dynamic_range_compression_rf: Option, @@ -1185,23 +1289,19 @@ pub struct Eac3Settings { #[serde(rename = "LfeFilter")] #[serde(skip_serializing_if = "Option::is_none")] pub lfe_filter: Option, - ///

Left only/Right only center mix level. Only used for 3/2 coding mode. - /// Valid values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60

+ ///

Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODINGMODE3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel).

#[serde(rename = "LoRoCenterMixLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub lo_ro_center_mix_level: Option, - ///

Left only/Right only surround mix level. Only used for 3/2 coding mode. - /// Valid values: -1.5 -3.0 -4.5 -6.0 -60

+ ///

Specify a value for the following Dolby Digital Plus setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODINGMODE3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only surround (loRoSurroundMixLevel).

#[serde(rename = "LoRoSurroundMixLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub lo_ro_surround_mix_level: Option, - ///

Left total/Right total center mix level. Only used for 3/2 coding mode. - /// Valid values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60

+ ///

Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODINGMODE3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel).

#[serde(rename = "LtRtCenterMixLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub lt_rt_center_mix_level: Option, - ///

Left total/Right total surround mix level. Only used for 3/2 coding mode. - /// Valid values: -1.5 -3.0 -4.5 -6.0 -60

+ ///

Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODINGMODE3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel).

#[serde(rename = "LtRtSurroundMixLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub lt_rt_surround_mix_level: Option, @@ -1217,11 +1317,11 @@ pub struct Eac3Settings { #[serde(rename = "PhaseControl")] #[serde(skip_serializing_if = "Option::is_none")] pub phase_control: Option, - ///

Sample rate in hz. Sample rate is always 48000.

+ ///

This value is always 48000. It represents the sample rate in Hz.

#[serde(rename = "SampleRate")] #[serde(skip_serializing_if = "Option::is_none")] pub sample_rate: Option, - ///

Stereo downmix preference. Only used for 3/2 coding mode.

+ ///

Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODINGMODE3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix).

#[serde(rename = "StereoDownmix")] #[serde(skip_serializing_if = "Option::is_none")] pub stereo_downmix: Option, @@ -1238,16 +1338,20 @@ pub struct Eac3Settings { ///

Settings specific to embedded/ancillary caption outputs, including 608/708 Channel destination number.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EmbeddedDestinationSettings { - ///

Ignore this setting unless your input captions are SCC format and your output container is MXF. With this combination of input captions format and output container, you can optionally use this setting to replace the input channel number with the track number that you specify. Specify a different number for each output captions track. If you don't specify an output track number, the system uses the input channel number for the output channel number. This setting applies to each output individually. You can optionally combine two captions channels in your output. The two output channel numbers can be one of the following pairs: 1,3; 2,4; 1,4; or 2,3.

+ ///

Ignore this setting unless your input captions are SCC format and your output captions are embedded in the video stream. Specify a CC number for each captions channel in this output. If you have two channels, choose CC numbers that aren't in the same field. For example, choose 1 and 3. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.

#[serde(rename = "Destination608ChannelNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub destination_608_channel_number: Option, + ///

Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.

+ #[serde(rename = "Destination708ServiceNumber")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_708_service_number: Option, } ///

Settings for embedded captions Source

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EmbeddedSourceSettings { - ///

When set to UPCONVERT, 608 data is both passed through via the "608 compatibility bytes" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded.

+ ///

Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.

#[serde(rename = "Convert608To708")] #[serde(skip_serializing_if = "Option::is_none")] pub convert_608_to_708: Option, @@ -1259,11 +1363,15 @@ pub struct EmbeddedSourceSettings { #[serde(rename = "Source608TrackNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub source_608_track_number: Option, + ///

By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting.

+ #[serde(rename = "TerminateCaptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub terminate_captions: Option, } ///

Describes an account-specific API endpoint.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Endpoint { ///

URL of endpoint

#[serde(rename = "Url")] @@ -1300,7 +1408,7 @@ pub struct EsamSettings { ///

ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EsamSignalProcessingNotification { - ///

Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. If you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both.

+ ///

Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both.

#[serde(rename = "SccXml")] #[serde(skip_serializing_if = "Option::is_none")] pub scc_xml: Option, @@ -1328,14 +1436,14 @@ pub struct FileGroupSettings { pub destination_settings: Option, } -///

Settings for File-based Captions in Source

+///

If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct FileSourceSettings { - ///

If set to UPCONVERT, 608 caption data is both passed through via the "608 compatibility bytes" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded.

+ ///

Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.

#[serde(rename = "Convert608To708")] #[serde(skip_serializing_if = "Option::is_none")] pub convert_608_to_708: Option, - ///

External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'.

+ ///

External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'.

#[serde(rename = "SourceFile")] #[serde(skip_serializing_if = "Option::is_none")] pub source_file: Option, @@ -1374,7 +1482,7 @@ pub struct GetJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobResponse { ///

Each job converts an input file into an output file or files. For more information, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html

#[serde(rename = "Job")] @@ -1390,7 +1498,7 @@ pub struct GetJobTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobTemplateResponse { ///

A job template is a pre-made set of encoding instructions that you can use to quickly create a job.

#[serde(rename = "JobTemplate")] @@ -1406,7 +1514,7 @@ pub struct GetPresetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPresetResponse { ///

A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process.

#[serde(rename = "Preset")] @@ -1422,7 +1530,7 @@ pub struct GetQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetQueueResponse { ///

You can use queues to manage the resources that are available to your AWS account for running multiple transcoding jobs at the same time. If you don't specify a queue, the service sends all jobs through the default queue. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.

#[serde(rename = "Queue")] @@ -1433,7 +1541,7 @@ pub struct GetQueueResponse { ///

Settings for quality-defined variable bitrate encoding with the H.264 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct H264QvbrSettings { - ///

Use this setting only when Rate control mode is QVBR and Quality tuning level is Multi-pass HQ. For Max average bitrate values suited to the complexity of your input video, the service limits the average bitrate of the video part of this output to the value you choose. That is, the total size of the video element is less than or equal to the value you set multiplied by the number of seconds of encoded output.

+ ///

Use this setting only when Rate control mode is QVBR and Quality tuning level is Multi-pass HQ. For Max average bitrate values suited to the complexity of your input video, the service limits the average bitrate of the video part of this output to the value that you choose. That is, the total size of the video element is less than or equal to the value you set multiplied by the number of seconds of encoded output.

#[serde(rename = "MaxAverageBitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub max_average_bitrate: Option, @@ -1450,7 +1558,7 @@ pub struct H264Settings { #[serde(rename = "AdaptiveQuantization")] #[serde(skip_serializing_if = "Option::is_none")] pub adaptive_quantization: Option, - ///

Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000.

+ ///

Specify the average bitrate in bits per second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, @@ -1568,7 +1676,7 @@ pub struct H264Settings { #[serde(rename = "RepeatPps")] #[serde(skip_serializing_if = "Option::is_none")] pub repeat_pps: Option, - ///

Scene change detection (inserts I-frames on scene changes).

+ ///

Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.

#[serde(rename = "SceneChangeDetect")] #[serde(skip_serializing_if = "Option::is_none")] pub scene_change_detect: Option, @@ -1609,7 +1717,7 @@ pub struct H264Settings { ///

Settings for quality-defined variable bitrate encoding with the H.265 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct H265QvbrSettings { - ///

Use this setting only when Rate control mode is QVBR and Quality tuning level is Multi-pass HQ. For Max average bitrate values suited to the complexity of your input video, the service limits the average bitrate of the video part of this output to the value you choose. That is, the total size of the video element is less than or equal to the value you set multiplied by the number of seconds of encoded output.

+ ///

Use this setting only when Rate control mode is QVBR and Quality tuning level is Multi-pass HQ. For Max average bitrate values suited to the complexity of your input video, the service limits the average bitrate of the video part of this output to the value that you choose. That is, the total size of the video element is less than or equal to the value you set multiplied by the number of seconds of encoded output.

#[serde(rename = "MaxAverageBitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub max_average_bitrate: Option, @@ -1630,7 +1738,7 @@ pub struct H265Settings { #[serde(rename = "AlternateTransferFunctionSei")] #[serde(skip_serializing_if = "Option::is_none")] pub alternate_transfer_function_sei: Option, - ///

Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000.

+ ///

Specify the average bitrate in bits per second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, @@ -1690,9 +1798,7 @@ pub struct H265Settings { #[serde(rename = "HrdBufferSize")] #[serde(skip_serializing_if = "Option::is_none")] pub hrd_buffer_size: Option, - ///

Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOPFIELD) and Bottom Field First (BOTTOMFIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOWTOPFIELD) and Follow, Default Bottom (FOLLOWBOTTOMFIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type. - /// - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of "top field first" and "bottom field first". - /// - If the source is progressive, the output will be interlaced with "top field first" or "bottom field first" polarity, depending on which of the Follow options you chose.

+ ///

Choose the scan line type for the output. Choose Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Choose Top Field First (TOPFIELD) or Bottom Field First (BOTTOMFIELD) to create an output that's interlaced with the same field polarity throughout. Choose Follow, Default Top (FOLLOWTOPFIELD) or Follow, Default Bottom (FOLLOWBOTTOMFIELD) to create an interlaced output with the same field polarity as the source. If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of "top field first" and "bottom field first". If the source is progressive, your output will be interlaced with "top field first" or "bottom field first" polarity, depending on which of the Follow options you chose. If you don't choose a value, the service will default to Progressive (PROGRESSIVE).

#[serde(rename = "InterlaceMode")] #[serde(skip_serializing_if = "Option::is_none")] pub interlace_mode: Option, @@ -1740,7 +1846,7 @@ pub struct H265Settings { #[serde(rename = "SampleAdaptiveOffsetFilterMode")] #[serde(skip_serializing_if = "Option::is_none")] pub sample_adaptive_offset_filter_mode: Option, - ///

Scene change detection (inserts I-frames on scene changes).

+ ///

Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.

#[serde(rename = "SceneChangeDetect")] #[serde(skip_serializing_if = "Option::is_none")] pub scene_change_detect: Option, @@ -1782,7 +1888,7 @@ pub struct H265Settings { pub write_mp_4_packaging_type: Option, } -///

Use the "HDR master display information" (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000; each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the following settings. Set "MP4 packaging type" (writeMp4PackagingType) to HVC1 (HVC1). Set "Profile" (H265Settings > codecProfile) to Main10/High (MAIN10HIGH). Set "Level" (H265Settings > codecLevel) to 5 (LEVEL5).

+///

Use these settings to specify static color calibration metadata, as defined by SMPTE ST 2086. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Hdr10Metadata { ///

HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.

@@ -1801,11 +1907,11 @@ pub struct Hdr10Metadata { #[serde(rename = "GreenPrimaryY")] #[serde(skip_serializing_if = "Option::is_none")] pub green_primary_y: Option, - ///

Maximum light level among all samples in the coded video sequence, in units of candelas per square meter.

+ ///

Maximum light level among all samples in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.

#[serde(rename = "MaxContentLightLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub max_content_light_level: Option, - ///

Maximum average light level of any frame in the coded video sequence, in units of candelas per square meter.

+ ///

Maximum average light level of any frame in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.

#[serde(rename = "MaxFrameAverageLightLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub max_frame_average_light_level: Option, @@ -1875,7 +1981,7 @@ pub struct HlsEncryptionSettings { #[serde(rename = "OfflineEncrypted")] #[serde(skip_serializing_if = "Option::is_none")] pub offline_encrypted: Option, - ///

Settings for use with a SPEKE key provider

+ ///

If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.

#[serde(rename = "SpekeKeyProvider")] #[serde(skip_serializing_if = "Option::is_none")] pub speke_key_provider: Option, @@ -1883,7 +1989,7 @@ pub struct HlsEncryptionSettings { #[serde(rename = "StaticKeyProvider")] #[serde(skip_serializing_if = "Option::is_none")] pub static_key_provider: Option, - ///

Indicates which type of key provider is used for encryption.

+ ///

Specify whether your DRM encryption key is static or from a key provider that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.

#[serde(rename = "Type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -1892,7 +1998,7 @@ pub struct HlsEncryptionSettings { ///

Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to HLSGROUPSETTINGS.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct HlsGroupSettings { - ///

Choose one or more ad marker types to pass SCTE35 signals through to this group of Apple HLS outputs.

+ ///

Choose one or more ad marker types to decorate your Apple HLS manifest. This setting does not determine whether SCTE-35 markers appear in the outputs themselves.

#[serde(rename = "AdMarkers")] #[serde(skip_serializing_if = "Option::is_none")] pub ad_markers: Option>, @@ -1997,6 +2103,10 @@ pub struct HlsSettings { #[serde(rename = "AudioGroupId")] #[serde(skip_serializing_if = "Option::is_none")] pub audio_group_id: Option, + ///

Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container.

+ #[serde(rename = "AudioOnlyContainer")] + #[serde(skip_serializing_if = "Option::is_none")] + pub audio_only_container: Option, ///

List all the audio groups that are used with the video output stream. Input all the audio GROUP-IDs that are associated to the video, separate by ','.

#[serde(rename = "AudioRenditionSets")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2037,6 +2147,15 @@ pub struct ImageInserter { pub insertable_images: Option>, } +///

Settings specific to IMSC caption outputs.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ImscDestinationSettings { + ///

Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions.

+ #[serde(rename = "StylePassthrough")] + #[serde(skip_serializing_if = "Option::is_none")] + pub style_passthrough: Option, +} + ///

Specifies media input

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Input { @@ -2052,6 +2171,10 @@ pub struct Input { #[serde(rename = "CaptionSelectors")] #[serde(skip_serializing_if = "Option::is_none")] pub caption_selectors: Option<::std::collections::HashMap>, + ///

Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop).

+ #[serde(rename = "Crop")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crop: Option, ///

Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs.

#[serde(rename = "DeblockFilter")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2084,6 +2207,10 @@ pub struct Input { #[serde(rename = "InputClippings")] #[serde(skip_serializing_if = "Option::is_none")] pub input_clippings: Option>, + ///

Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior).

+ #[serde(rename = "Position")] + #[serde(skip_serializing_if = "Option::is_none")] + pub position: Option, ///

Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.

#[serde(rename = "ProgramNumber")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2096,10 +2223,14 @@ pub struct Input { #[serde(rename = "SupplementalImps")] #[serde(skip_serializing_if = "Option::is_none")] pub supplemental_imps: Option>, - ///

Timecode source under input settings (InputTimecodeSource) only affects the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Use this setting to specify whether the service counts frames by timecodes embedded in the video (EMBEDDED) or by starting the first frame at zero (ZEROBASED). In both cases, the timecode format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only set this to EMBEDDED if your source video has embedded timecodes.

+ ///

Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.

#[serde(rename = "TimecodeSource")] #[serde(skip_serializing_if = "Option::is_none")] pub timecode_source: Option, + ///

Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.

+ #[serde(rename = "TimecodeStart")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timecode_start: Option, ///

Selector for video.

#[serde(rename = "VideoSelector")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2155,6 +2286,10 @@ pub struct InputTemplate { #[serde(rename = "CaptionSelectors")] #[serde(skip_serializing_if = "Option::is_none")] pub caption_selectors: Option<::std::collections::HashMap>, + ///

Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop).

+ #[serde(rename = "Crop")] + #[serde(skip_serializing_if = "Option::is_none")] + pub crop: Option, ///

Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs.

#[serde(rename = "DeblockFilter")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2179,6 +2314,10 @@ pub struct InputTemplate { #[serde(rename = "InputClippings")] #[serde(skip_serializing_if = "Option::is_none")] pub input_clippings: Option>, + ///

Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior).

+ #[serde(rename = "Position")] + #[serde(skip_serializing_if = "Option::is_none")] + pub position: Option, ///

Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.

#[serde(rename = "ProgramNumber")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2187,10 +2326,14 @@ pub struct InputTemplate { #[serde(rename = "PsiControl")] #[serde(skip_serializing_if = "Option::is_none")] pub psi_control: Option, - ///

Timecode source under input settings (InputTimecodeSource) only affects the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Use this setting to specify whether the service counts frames by timecodes embedded in the video (EMBEDDED) or by starting the first frame at zero (ZEROBASED). In both cases, the timecode format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only set this to EMBEDDED if your source video has embedded timecodes.

+ ///

Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.

#[serde(rename = "TimecodeSource")] #[serde(skip_serializing_if = "Option::is_none")] pub timecode_source: Option, + ///

Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.

+ #[serde(rename = "TimecodeStart")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timecode_start: Option, ///

Selector for video.

#[serde(rename = "VideoSelector")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2248,7 +2391,7 @@ pub struct InsertableImage { ///

Each job converts an input file into an output file or files. For more information, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Job { ///

Accelerated transcoding can significantly speed up jobs with long, visually complex content.

#[serde(rename = "AccelerationSettings")] @@ -2282,7 +2425,7 @@ pub struct Job { #[serde(rename = "Id")] #[serde(skip_serializing_if = "Option::is_none")] pub id: Option, - ///

An estimate of how far your job has progressed. This estimate is shown as a percentage of the total time from when your job leaves its queue to when your output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the response to GetJob and ListJobs requests. The jobPercentComplete estimate is reliable for the following input containers: Quicktime, Transport Stream, MP4, and MXF. For some jobs, including audio-only jobs and jobs that use input clipping, the service can't provide information about job progress. In those cases, jobPercentComplete returns a null value.

+ ///

An estimate of how far your job has progressed. This estimate is shown as a percentage of the total time from when your job leaves its queue to when your output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the response to GetJob and ListJobs requests. The jobPercentComplete estimate is reliable for the following input containers: Quicktime, Transport Stream, MP4, and MXF. For some jobs, the service can't provide information about job progress. In those cases, jobPercentComplete returns a null value.

#[serde(rename = "JobPercentComplete")] #[serde(skip_serializing_if = "Option::is_none")] pub job_percent_complete: Option, @@ -2294,6 +2437,10 @@ pub struct Job { #[serde(rename = "OutputGroupDetails")] #[serde(skip_serializing_if = "Option::is_none")] pub output_group_details: Option>, + ///

Relative priority on the job.

+ #[serde(rename = "Priority")] + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, ///

Optional. When you create a job, you can specify a queue to send it to. If you don't specify, the job will go to the default queue. For more about queues, see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html

#[serde(rename = "Queue")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2308,6 +2455,10 @@ pub struct Job { ///

JobSettings contains all the transcode settings for a job.

#[serde(rename = "Settings")] pub settings: JobSettings, + ///

Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default.

+ #[serde(rename = "SimulateReservedQueue")] + #[serde(skip_serializing_if = "Option::is_none")] + pub simulate_reserved_queue: Option, ///

A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2349,7 +2500,7 @@ pub struct JobSettings { #[serde(rename = "MotionImageInserter")] #[serde(skip_serializing_if = "Option::is_none")] pub motion_image_inserter: Option, - ///

Settings for Nielsen Configuration

+ ///

Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting.

#[serde(rename = "NielsenConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub nielsen_configuration: Option, @@ -2369,9 +2520,9 @@ pub struct JobSettings { ///

A job template is a pre-made set of encoding instructions that you can use to quickly create a job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobTemplate { - ///

Accelerated transcoding is currently in private preview. Contact AWS for more information.

+ ///

Accelerated transcoding can significantly speed up jobs with long, visually complex content.

#[serde(rename = "AccelerationSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub acceleration_settings: Option, @@ -2398,6 +2549,10 @@ pub struct JobTemplate { ///

A name you create for each job template. Each name must be unique within your account.

#[serde(rename = "Name")] pub name: String, + ///

Relative priority on the job.

+ #[serde(rename = "Priority")] + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, ///

Optional. The queue that jobs created from this template are assigned to. If you don't specify this, jobs will go to the default queue.

#[serde(rename = "Queue")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2438,7 +2593,7 @@ pub struct JobTemplateSettings { #[serde(rename = "MotionImageInserter")] #[serde(skip_serializing_if = "Option::is_none")] pub motion_image_inserter: Option, - ///

Settings for Nielsen Configuration

+ ///

Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting.

#[serde(rename = "NielsenConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub nielsen_configuration: Option, @@ -2481,7 +2636,7 @@ pub struct ListJobTemplatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobTemplatesResponse { ///

List of Job templates.

#[serde(rename = "JobTemplates")] @@ -2518,7 +2673,7 @@ pub struct ListJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResponse { ///

List of jobs

#[serde(rename = "Jobs")] @@ -2555,7 +2710,7 @@ pub struct ListPresetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPresetsResponse { ///

Use this string to request the next batch of presets.

#[serde(rename = "NextToken")] @@ -2588,7 +2743,7 @@ pub struct ListQueuesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListQueuesResponse { ///

Use this string to request the next batch of queues.

#[serde(rename = "NextToken")] @@ -2608,7 +2763,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert resource.

#[serde(rename = "ResourceTags")] @@ -2744,7 +2899,7 @@ pub struct M2tsSettings { #[serde(rename = "Scte35Pid")] #[serde(skip_serializing_if = "Option::is_none")] pub scte_35_pid: Option, - ///

Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output.

+ ///

For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam).

#[serde(rename = "Scte35Source")] #[serde(skip_serializing_if = "Option::is_none")] pub scte_35_source: Option, @@ -2821,7 +2976,7 @@ pub struct M3u8Settings { #[serde(rename = "Scte35Pid")] #[serde(skip_serializing_if = "Option::is_none")] pub scte_35_pid: Option, - ///

Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output.

+ ///

For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml).

#[serde(rename = "Scte35Source")] #[serde(skip_serializing_if = "Option::is_none")] pub scte_35_source: Option, @@ -2926,7 +3081,7 @@ pub struct MovSettings { ///

Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Mp2Settings { - ///

Average bitrate in bits/second.

+ ///

Specify the average bitrate in bits per second.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, @@ -2940,7 +3095,7 @@ pub struct Mp2Settings { pub sample_rate: Option, } -///

Settings for MP4 Container

+///

Settings for MP4 container. You can create audio-only AAC outputs with this container.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Mp4Settings { ///

When enabled, file composition times will start at zero, composition times in the 'ctts' (composition time to sample) box for B-frames will be negative, and a 'cslg' (composition shift least greatest) box will be included per 14496-1 amendment 1. This improves compatibility with Apple players and tools.

@@ -2968,7 +3123,7 @@ pub struct Mpeg2Settings { #[serde(rename = "AdaptiveQuantization")] #[serde(skip_serializing_if = "Option::is_none")] pub adaptive_quantization: Option, - ///

Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000.

+ ///

Specify the average bitrate in bits per second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000.

#[serde(rename = "Bitrate")] #[serde(skip_serializing_if = "Option::is_none")] pub bitrate: Option, @@ -3062,7 +3217,7 @@ pub struct Mpeg2Settings { #[serde(rename = "RateControlMode")] #[serde(skip_serializing_if = "Option::is_none")] pub rate_control_mode: Option, - ///

Scene change detection (inserts I-frames on scene changes).

+ ///

Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default.

#[serde(rename = "SceneChangeDetect")] #[serde(skip_serializing_if = "Option::is_none")] pub scene_change_detect: Option, @@ -3095,7 +3250,7 @@ pub struct Mpeg2Settings { ///

If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MsSmoothEncryptionSettings { - ///

Settings for use with a SPEKE key provider

+ ///

If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.

#[serde(rename = "SpekeKeyProvider")] #[serde(skip_serializing_if = "Option::is_none")] pub speke_key_provider: Option, @@ -3130,10 +3285,10 @@ pub struct MsSmoothGroupSettings { pub manifest_encoding: Option, } -///

Settings for Nielsen Configuration

+///

Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NielsenConfiguration { - ///

Use Nielsen Configuration (NielsenConfiguration) to set the Nielsen measurement system breakout code. Supported values are 0, 3, 7, and 9.

+ ///

Nielsen has discontinued the use of breakout code functionality. If you must include this property, set the value to zero.

#[serde(rename = "BreakoutCode")] #[serde(skip_serializing_if = "Option::is_none")] pub breakout_code: Option, @@ -3146,7 +3301,7 @@ pub struct NielsenConfiguration { ///

Enable the Noise reducer (NoiseReducer) feature to remove noise from your video output if necessary. Enable or disable this feature for each output individually. This setting is disabled by default. When you enable Noise reducer (NoiseReducer), you must also select a value for Noise reducer filter (NoiseReducerFilter).

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NoiseReducer { - ///

Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction filter. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution filters. * Conserve is a min/max noise reduction filter. * Spatial is a frequency-domain filter based on JND principles.

+ ///

Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion.

#[serde(rename = "Filter")] #[serde(skip_serializing_if = "Option::is_none")] pub filter: Option, @@ -3158,6 +3313,10 @@ pub struct NoiseReducer { #[serde(rename = "SpatialFilterSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub spatial_filter_settings: Option, + ///

Noise reducer filter settings for temporal filter.

+ #[serde(rename = "TemporalFilterSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub temporal_filter_settings: Option, } ///

Settings for a noise reducer filter

@@ -3186,6 +3345,23 @@ pub struct NoiseReducerSpatialFilterSettings { pub strength: Option, } +///

Noise reducer filter settings for temporal filter.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NoiseReducerTemporalFilterSettings { + ///

Use Aggressive mode for content that has complex motion. Higher values produce stronger temporal filtering. This filters highly complex scenes more aggressively and creates better VQ for low bitrate outputs.

+ #[serde(rename = "AggressiveMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub aggressive_mode: Option, + ///

The speed of the filter (higher number is faster). Low setting reduces bit rate at the cost of transcode time, high setting improves transcode time at the cost of bit rate.

+ #[serde(rename = "Speed")] + #[serde(skip_serializing_if = "Option::is_none")] + pub speed: Option, + ///

Specify the strength of the noise reducing filter on this output. Higher values produce stronger filtering. We recommend the following value ranges, depending on the result that you want: * 0-2 for complexity reduction with minimal sharpness loss * 2-8 for complexity reduction with image preservation * 8-16 for a high level of complexity reduction

+ #[serde(rename = "Strength")] + #[serde(skip_serializing_if = "Option::is_none")] + pub strength: Option, +} + ///

An output object describes the settings for a single output file or stream in an output group.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Output { @@ -3217,7 +3393,7 @@ pub struct Output { #[serde(rename = "Preset")] #[serde(skip_serializing_if = "Option::is_none")] pub preset: Option, - ///

(VideoDescription) contains a group of video encoding settings. The specific video settings depend on the video codec you choose when you specify a value for Video codec (codec). Include one instance of (VideoDescription) per output.

+ ///

(VideoDescription) contains a group of video encoding settings. The specific video settings depend on the video codec that you choose when you specify a value for Video codec (codec). Include one instance of (VideoDescription) per output.

#[serde(rename = "VideoDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub video_description: Option, @@ -3234,7 +3410,7 @@ pub struct OutputChannelMapping { ///

Details regarding output

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OutputDetail { ///

Duration in milliseconds

#[serde(rename = "DurationInMs")] @@ -3269,7 +3445,7 @@ pub struct OutputGroup { ///

Contains details about the output groups specified in the job settings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OutputGroupDetail { ///

Details about the output

#[serde(rename = "OutputDetails")] @@ -3317,7 +3493,7 @@ pub struct OutputSettings { ///

A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Preset { ///

An identifier for this resource that is unique within all of AWS.

#[serde(rename = "Arn")] @@ -3366,7 +3542,7 @@ pub struct PresetSettings { #[serde(rename = "ContainerSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub container_settings: Option, - ///

(VideoDescription) contains a group of video encoding settings. The specific video settings depend on the video codec you choose when you specify a value for Video codec (codec). Include one instance of (VideoDescription) per output.

+ ///

(VideoDescription) contains a group of video encoding settings. The specific video settings depend on the video codec that you choose when you specify a value for Video codec (codec). Include one instance of (VideoDescription) per output.

#[serde(rename = "VideoDescription")] #[serde(skip_serializing_if = "Option::is_none")] pub video_description: Option, @@ -3425,7 +3601,7 @@ pub struct ProresSettings { ///

You can use queues to manage the resources that are available to your AWS account for running multiple transcoding jobs at the same time. If you don't specify a queue, the service sends all jobs through the default queue. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Queue { ///

An identifier for this resource that is unique within all of AWS.

#[serde(rename = "Arn")] @@ -3504,7 +3680,7 @@ pub struct RemixSettings { #[serde(rename = "ChannelsIn")] #[serde(skip_serializing_if = "Option::is_none")] pub channels_in: Option, - ///

Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8

+ ///

Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.)

#[serde(rename = "ChannelsOut")] #[serde(skip_serializing_if = "Option::is_none")] pub channels_out: Option, @@ -3512,7 +3688,7 @@ pub struct RemixSettings { ///

Details about the pricing plan for your reserved queue. Required for reserved queues and not applicable to on-demand queues.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationPlan { ///

The length of the term of your reserved queue pricing plan commitment.

#[serde(rename = "Commitment")] @@ -3556,7 +3732,7 @@ pub struct ReservationPlanSettings { ///

The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceTags { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "Arn")] @@ -3599,22 +3775,48 @@ pub struct SccDestinationSettings { pub framerate: Option, } -///

Settings for use with a SPEKE key provider

+///

If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SpekeKeyProvider { - ///

Optional AWS Certificate Manager ARN for a certificate to send to the keyprovider. The certificate holds a key used by the keyprovider to encrypt the keys in its response.

+ ///

If you want your key provider to encrypt the content keys that it provides to MediaConvert, set up a certificate with a master key using AWS Certificate Manager. Specify the certificate's Amazon Resource Name (ARN) here.

#[serde(rename = "CertificateArn")] #[serde(skip_serializing_if = "Option::is_none")] pub certificate_arn: Option, - ///

The SPEKE-compliant server uses Resource ID (ResourceId) to identify content.

+ ///

Specify the resource ID that your SPEKE-compliant key provider uses to identify this content.

#[serde(rename = "ResourceId")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_id: Option, - ///

Relates to SPEKE implementation. DRM system identifiers. DASH output groups support a max of two system ids. Other group types support one system id.

+ ///

Relates to SPEKE implementation. DRM system identifiers. DASH output groups support a max of two system ids. Other group types support one system id. See + /// https://dashif.org/identifiers/content_protection/ for more details.

#[serde(rename = "SystemIds")] #[serde(skip_serializing_if = "Option::is_none")] pub system_ids: Option>, - ///

Use URL (Url) to specify the SPEKE-compliant server that will provide keys for content.

+ ///

Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.

+ #[serde(rename = "Url")] + #[serde(skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +///

If your output group type is CMAF, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or Microsoft Smooth, use the SpekeKeyProvider settings instead.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SpekeKeyProviderCmaf { + ///

If you want your key provider to encrypt the content keys that it provides to MediaConvert, set up a certificate with a master key using AWS Certificate Manager. Specify the certificate's Amazon Resource Name (ARN) here.

+ #[serde(rename = "CertificateArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub certificate_arn: Option, + ///

Specify the DRM system IDs that you want signaled in the DASH manifest that MediaConvert creates as part of this CMAF package. The DASH manifest can currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/.

+ #[serde(rename = "DashSignaledSystemIds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dash_signaled_system_ids: Option>, + ///

Specify the DRM system ID that you want signaled in the HLS manifest that MediaConvert creates as part of this CMAF package. The HLS manifest can currently signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/.

+ #[serde(rename = "HlsSignaledSystemIds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub hls_signaled_system_ids: Option>, + ///

Specify the resource ID that your SPEKE-compliant key provider uses to identify this content.

+ #[serde(rename = "ResourceId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id: Option, + ///

Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.

#[serde(rename = "Url")] #[serde(skip_serializing_if = "Option::is_none")] pub url: Option, @@ -3652,7 +3854,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} ///

Settings for Teletext caption output

@@ -3662,6 +3864,10 @@ pub struct TeletextDestinationSettings { #[serde(rename = "PageNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub page_number: Option, + ///

Specify the page types for this Teletext page. If you don't specify a value here, the service sets the page type to the default value Subtitle (PAGETYPESUBTITLE). If you pass through the entire set of Teletext data, don't use this field. When you pass through a set of Teletext pages, your output has the same page types as your input.

+ #[serde(rename = "PageTypes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub page_types: Option>, } ///

Settings specific to Teletext caption sources, including Page number.

@@ -3722,7 +3928,7 @@ pub struct TimedMetadataInsertion { ///

Information about when jobs are submitted, started, and finished is specified in Unix epoch format in seconds.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Timing { ///

The time, in Unix epoch format, that the transcoding job finished

#[serde(rename = "FinishTime")] @@ -3738,7 +3944,7 @@ pub struct Timing { pub submit_time: Option, } -///

Settings specific to caption sources that are specfied by track number. Sources include IMSC in IMF.

+///

Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TrackSourceSettings { ///

Use this setting to select a single captions track from a source. Track numbers correspond to the order in the captions source file. For IMF sources, track numbering is based on the order that the captions appear in the CPL. For example, use 1 to select the captions asset that is listed first in the CPL. To include more than one captions track in your job outputs, create multiple input captions selectors. Specify one track per selector.

@@ -3768,7 +3974,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -3788,6 +3994,10 @@ pub struct UpdateJobTemplateRequest { ///

The name of the job template you are modifying

#[serde(rename = "Name")] pub name: String, + ///

Specify the relative priority for this job. In any given queue, the service begins processing the job with the highest value first. When more than one job has the same priority, the service begins processing the job that you submitted first. If you don't specify a priority, the service uses the default value 0.

+ #[serde(rename = "Priority")] + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, ///

The new queue for the job template, if you are changing it.

#[serde(rename = "Queue")] #[serde(skip_serializing_if = "Option::is_none")] @@ -3803,7 +4013,7 @@ pub struct UpdateJobTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateJobTemplateResponse { ///

A job template is a pre-made set of encoding instructions that you can use to quickly create a job.

#[serde(rename = "JobTemplate")] @@ -3831,7 +4041,7 @@ pub struct UpdatePresetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePresetResponse { ///

A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process.

#[serde(rename = "Preset")] @@ -3859,7 +4069,7 @@ pub struct UpdateQueueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateQueueResponse { ///

You can use queues to manage the resources that are available to your AWS account for running multiple transcoding jobs at the same time. If you don't specify a queue, the service sends all jobs through the default queue. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.

#[serde(rename = "Queue")] @@ -3867,7 +4077,7 @@ pub struct UpdateQueueResponse { pub queue: Option, } -///

Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value you choose for Video codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H264, H264Settings * H265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings

+///

Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H264, H264Settings * H265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VideoCodecSettings { ///

Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec.

@@ -3903,19 +4113,19 @@ pub struct VideoDescription { #[serde(rename = "AfdSignaling")] #[serde(skip_serializing_if = "Option::is_none")] pub afd_signaling: Option, - ///

The service automatically applies the anti-alias filter to all outputs. The service no longer accepts the value DISABLED for AntiAlias. If you specify that in your job, the service will ignore the setting.

+ ///

The anti-alias filter is automatically applied to all outputs. The service no longer accepts the value DISABLED for AntiAlias. If you specify that in your job, the service will ignore the setting.

#[serde(rename = "AntiAlias")] #[serde(skip_serializing_if = "Option::is_none")] pub anti_alias: Option, - ///

Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value you choose for Video codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H264, H264Settings * H265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings

+ ///

Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H264, H264Settings * H265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings

#[serde(rename = "CodecSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub codec_settings: Option, - ///

Enable Insert color metadata (ColorMetadata) to include color metadata in this output. This setting is enabled by default.

+ ///

Choose Insert (INSERT) for this setting to include color metadata in this output. Choose Ignore (IGNORE) to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.

#[serde(rename = "ColorMetadata")] #[serde(skip_serializing_if = "Option::is_none")] pub color_metadata: Option, - ///

Applies only if your input aspect ratio is different from your output aspect ratio. Use Input cropping rectangle (Crop) to specify the video area the service will include in the output. This will crop the input source, causing video pixels to be removed on encode. If you crop your input frame size to smaller than your output frame size, make sure to specify the behavior you want in your output setting "Scaling behavior".

+ ///

Use Cropping selection (crop) to specify the video area that the service will include in the output video frame.

#[serde(rename = "Crop")] #[serde(skip_serializing_if = "Option::is_none")] pub crop: Option, @@ -3931,7 +4141,7 @@ pub struct VideoDescription { #[serde(rename = "Height")] #[serde(skip_serializing_if = "Option::is_none")] pub height: Option, - ///

Use Position (Position) to point to a rectangle object to define your position. This setting overrides any other aspect ratio.

+ ///

Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black.

#[serde(rename = "Position")] #[serde(skip_serializing_if = "Option::is_none")] pub position: Option, @@ -3939,7 +4149,7 @@ pub struct VideoDescription { #[serde(rename = "RespondToAfd")] #[serde(skip_serializing_if = "Option::is_none")] pub respond_to_afd: Option, - ///

Applies only if your input aspect ratio is different from your output aspect ratio. Choose "Stretch to output" to have the service stretch your video image to fit. Keep the setting "Default" to allow the service to letterbox your video instead. This setting overrides any positioning value you specify elsewhere in the job.

+ ///

Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output (STRETCHTOOUTPUT) to have the service stretch your video image to fit. Keep the setting Default (DEFAULT) to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement (position) in this output.

#[serde(rename = "ScalingBehavior")] #[serde(skip_serializing_if = "Option::is_none")] pub scaling_behavior: Option, @@ -3963,7 +4173,7 @@ pub struct VideoDescription { ///

Contains details about the output's video stream

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VideoDetail { ///

Height in pixels for the output

#[serde(rename = "HeightInPx")] @@ -4003,15 +4213,15 @@ pub struct VideoPreprocessor { ///

Selector for video.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VideoSelector { - ///

If your input video has accurate color space metadata, or if you don't know about color space, leave this set to the default value FOLLOW. The service will automatically detect your input color space. If your input video has metadata indicating the wrong color space, or if your input video is missing color space metadata that should be there, specify the accurate color space here. If you choose HDR10, you can also correct inaccurate color space coefficients, using the HDR master display information controls. You must also set Color space usage (ColorSpaceUsage) to FORCE for the service to use these values.

+ ///

If your input video has accurate color space metadata, or if you don't know about color space, leave this set to the default value Follow (FOLLOW). The service will automatically detect your input color space. If your input video has metadata indicating the wrong color space, specify the accurate color space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering Display Color Volume static metadata isn't present in your video stream, or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) here and specify correct values in the input HDR 10 metadata (Hdr10Metadata) settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.

#[serde(rename = "ColorSpace")] #[serde(skip_serializing_if = "Option::is_none")] pub color_space: Option, - ///

There are two sources for color metadata, the input file and the job configuration (in the Color space and HDR master display informaiton settings). The Color space usage setting controls which takes precedence. FORCE: The system will use color metadata supplied by user, if any. If the user does not supply color metadata, the system will use data from the source. FALLBACK: The system will use color metadata from the source. If source has no color metadata, the system will use user-supplied color metadata values if available.

+ ///

There are two sources for color metadata, the input file and the job input settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). The Color space usage setting determines which takes precedence. Choose Force (FORCE) to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.

#[serde(rename = "ColorSpaceUsage")] #[serde(skip_serializing_if = "Option::is_none")] pub color_space_usage: Option, - ///

Use the "HDR master display information" (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000; each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the following settings. Set "MP4 packaging type" (writeMp4PackagingType) to HVC1 (HVC1). Set "Profile" (H265Settings > codecProfile) to Main10/High (MAIN10HIGH). Set "Level" (H265Settings > codecLevel) to 5 (LEVEL5).

+ ///

Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage (ColorSpaceUsage). To specify whether color metadata is included in an output, set Color metadata (ColorMetadata). For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.

#[serde(rename = "Hdr10Metadata")] #[serde(skip_serializing_if = "Option::is_none")] pub hdr_10_metadata: Option, @@ -4036,7 +4246,7 @@ pub struct WavSettings { #[serde(rename = "BitDepth")] #[serde(skip_serializing_if = "Option::is_none")] pub bit_depth: Option, - ///

Set Channels to specify the number of channels in this output audio track. With WAV, valid values 1, 2, 4, and 8. In the console, these values are Mono, Stereo, 4-Channel, and 8-Channel, respectively.

+ ///

Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.

#[serde(rename = "Channels")] #[serde(skip_serializing_if = "Option::is_none")] pub channels: Option, @@ -5796,10 +6006,7 @@ impl MediaConvertClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MediaConvertClient { - MediaConvertClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5813,10 +6020,14 @@ impl MediaConvertClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MediaConvertClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MediaConvertClient { + MediaConvertClient { client, region } } } diff --git a/rusoto/services/medialive/Cargo.toml b/rusoto/services/medialive/Cargo.toml index fdbf2d97a8d..4dab31117ce 100644 --- a/rusoto/services/medialive/Cargo.toml +++ b/rusoto/services/medialive/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_medialive" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/medialive/README.md b/rusoto/services/medialive/README.md index 2c067998158..ebab6774884 100644 --- a/rusoto/services/medialive/README.md +++ b/rusoto/services/medialive/README.md @@ -23,9 +23,16 @@ To use `rusoto_medialive` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_medialive = "0.40.0" +rusoto_medialive = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/medialive/src/custom/mod.rs b/rusoto/services/medialive/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/medialive/src/custom/mod.rs +++ b/rusoto/services/medialive/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/medialive/src/generated.rs b/rusoto/services/medialive/src/generated.rs index ef5c09873e1..9b390b6dc0b 100644 --- a/rusoto/services/medialive/src/generated.rs +++ b/rusoto/services/medialive/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -258,7 +257,9 @@ pub struct AudioOnlyHlsSettings { #[serde(rename = "AudioGroupId")] #[serde(skip_serializing_if = "Option::is_none")] pub audio_group_id: Option, - ///

For use with an audio only Stream. Must be a .jpg or .png file. If given, this image will be used as the cover-art for the audio only output. Ideally, it should be formatted for an iPhone screen for two reasons. The iPhone does not resize the image, it crops a centered image on the top/bottom and left/right. Additionally, this image file gets saved bit-for-bit into every 10-second segment file, so will increase bandwidth by {image file size} * {segment count} * {user count.}.

+ ///

Optional. Specifies the .jpg or .png image to use as the cover art for an audio-only output. We recommend a low bit-size file because the image increases the output audio bandwidth.

+ /// + ///

The image is attached to the audio as an ID3 tag, frame type APIC, picture type 0x10, as per the "ID3 tag version 2.4.0 - Native Frames" standard.

#[serde(rename = "AudioOnlyImage")] #[serde(skip_serializing_if = "Option::is_none")] pub audio_only_image: Option, @@ -354,7 +355,7 @@ pub struct BatchScheduleActionCreateRequest { ///

List of actions that have been created in the schedule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchScheduleActionCreateResult { ///

List of actions that have been created in the schedule.

#[serde(rename = "ScheduleActions")] @@ -371,7 +372,7 @@ pub struct BatchScheduleActionDeleteRequest { ///

List of actions that have been deleted from the schedule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchScheduleActionDeleteResult { ///

List of actions that have been deleted from the schedule.

#[serde(rename = "ScheduleActions")] @@ -396,7 +397,7 @@ pub struct BatchUpdateScheduleRequest { ///

Placeholder documentation for BatchUpdateScheduleResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchUpdateScheduleResponse { ///

Schedule actions created in the schedule.

#[serde(rename = "Creates")] @@ -625,7 +626,7 @@ pub struct CaptionSelectorSettings { ///

Placeholder documentation for Channel

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Channel { ///

The unique arn of the channel.

#[serde(rename = "Arn")] @@ -667,6 +668,10 @@ pub struct Channel { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + ///

Runtime details for the pipelines of a running channel.

+ #[serde(rename = "PipelineDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pipeline_details: Option>, ///

The number of currently healthy pipelines.

#[serde(rename = "PipelinesRunningCount")] #[serde(skip_serializing_if = "Option::is_none")] @@ -686,7 +691,7 @@ pub struct Channel { ///

Placeholder documentation for ChannelEgressEndpoint

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChannelEgressEndpoint { ///

Public IP of where a channel's output comes from

#[serde(rename = "SourceIp")] @@ -696,7 +701,7 @@ pub struct ChannelEgressEndpoint { ///

Placeholder documentation for ChannelSummary

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChannelSummary { ///

The unique arn of the channel.

#[serde(rename = "Arn")] @@ -752,6 +757,10 @@ pub struct ChannelSummary { pub tags: Option<::std::collections::HashMap>, } +///

Passthrough applies no color space conversion to the output

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ColorSpacePassthroughSettings {} + ///

A request to create a channel

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateChannelRequest { @@ -798,7 +807,7 @@ pub struct CreateChannelRequest { ///

Placeholder documentation for CreateChannelResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateChannelResponse { #[serde(rename = "Channel")] #[serde(skip_serializing_if = "Option::is_none")] @@ -855,7 +864,7 @@ pub struct CreateInputRequest { ///

Placeholder documentation for CreateInputResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInputResponse { #[serde(rename = "Input")] #[serde(skip_serializing_if = "Option::is_none")] @@ -877,7 +886,7 @@ pub struct CreateInputSecurityGroupRequest { ///

Placeholder documentation for CreateInputSecurityGroupResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInputSecurityGroupResponse { #[serde(rename = "SecurityGroup")] #[serde(skip_serializing_if = "Option::is_none")] @@ -904,7 +913,7 @@ pub struct DeleteChannelRequest { ///

Placeholder documentation for DeleteChannelResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteChannelResponse { ///

The unique arn of the channel.

#[serde(rename = "Arn")] @@ -946,6 +955,10 @@ pub struct DeleteChannelResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + ///

Runtime details for the pipelines of a running channel.

+ #[serde(rename = "PipelineDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pipeline_details: Option>, ///

The number of currently healthy pipelines.

#[serde(rename = "PipelinesRunningCount")] #[serde(skip_serializing_if = "Option::is_none")] @@ -973,7 +986,7 @@ pub struct DeleteInputRequest { ///

Placeholder documentation for DeleteInputResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInputResponse {} ///

Placeholder documentation for DeleteInputSecurityGroupRequest

@@ -986,7 +999,7 @@ pub struct DeleteInputSecurityGroupRequest { ///

Placeholder documentation for DeleteInputSecurityGroupResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInputSecurityGroupResponse {} ///

Placeholder documentation for DeleteReservationRequest

@@ -999,7 +1012,7 @@ pub struct DeleteReservationRequest { ///

Placeholder documentation for DeleteReservationResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteReservationResponse { ///

Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'

#[serde(rename = "Arn")] @@ -1085,7 +1098,7 @@ pub struct DeleteScheduleRequest { ///

Placeholder documentation for DeleteScheduleResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteScheduleResponse {} ///

Placeholder documentation for DeleteTagsRequest

@@ -1108,7 +1121,7 @@ pub struct DescribeChannelRequest { ///

Placeholder documentation for DescribeChannelResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeChannelResponse { ///

The unique arn of the channel.

#[serde(rename = "Arn")] @@ -1150,6 +1163,10 @@ pub struct DescribeChannelResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + ///

Runtime details for the pipelines of a running channel.

+ #[serde(rename = "PipelineDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pipeline_details: Option>, ///

The number of currently healthy pipelines.

#[serde(rename = "PipelinesRunningCount")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1177,7 +1194,7 @@ pub struct DescribeInputRequest { ///

Placeholder documentation for DescribeInputResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInputResponse { ///

The Unique ARN of the input (generated, immutable).

#[serde(rename = "Arn")] @@ -1200,6 +1217,11 @@ pub struct DescribeInputResponse { #[serde(rename = "InputClass")] #[serde(skip_serializing_if = "Option::is_none")] pub input_class: Option, + ///

Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes + /// during input switch actions. Presently, this functionality only works with MP4_FILE inputs.

+ #[serde(rename = "InputSourceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_source_type: Option, ///

A list of MediaConnect Flows for this input.

#[serde(rename = "MediaConnectFlows")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1242,7 +1264,7 @@ pub struct DescribeInputSecurityGroupRequest { ///

Placeholder documentation for DescribeInputSecurityGroupResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInputSecurityGroupResponse { ///

Unique ARN of Input Security Group

#[serde(rename = "Arn")] @@ -1280,7 +1302,7 @@ pub struct DescribeOfferingRequest { ///

Placeholder documentation for DescribeOfferingResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOfferingResponse { ///

Unique offering ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:offering:87654321'

#[serde(rename = "Arn")] @@ -1338,7 +1360,7 @@ pub struct DescribeReservationRequest { ///

Placeholder documentation for DescribeReservationResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeReservationResponse { ///

Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'

#[serde(rename = "Arn")] @@ -1430,7 +1452,7 @@ pub struct DescribeScheduleRequest { ///

Placeholder documentation for DescribeScheduleResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeScheduleResponse { ///

The next token; for use in pagination.

#[serde(rename = "NextToken")] @@ -1810,6 +1832,20 @@ pub struct GlobalConfiguration { pub support_low_framerate_inputs: Option, } +///

H264 Color Space Settings

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct H264ColorSpaceSettings { + #[serde(rename = "ColorSpacePassthroughSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub color_space_passthrough_settings: Option, + #[serde(rename = "Rec601Settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rec_601_settings: Option, + #[serde(rename = "Rec709Settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rec_709_settings: Option, +} + ///

H264 Settings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct H264Settings { @@ -1829,7 +1865,7 @@ pub struct H264Settings { #[serde(rename = "BufFillPct")] #[serde(skip_serializing_if = "Option::is_none")] pub buf_fill_pct: Option, - ///

Size of buffer (HRD buffer model) in bits/second.

+ ///

Size of buffer (HRD buffer model) in bits.

#[serde(rename = "BufSize")] #[serde(skip_serializing_if = "Option::is_none")] pub buf_size: Option, @@ -1837,6 +1873,10 @@ pub struct H264Settings { #[serde(rename = "ColorMetadata")] #[serde(skip_serializing_if = "Option::is_none")] pub color_metadata: Option, + ///

Color Space settings

+ #[serde(rename = "ColorSpaceSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub color_space_settings: Option, ///

Entropy encoding mode. Use cabac (must be in Main or High profile) or cavlc.

#[serde(rename = "EntropyEncoding")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1889,7 +1929,7 @@ pub struct H264Settings { #[serde(rename = "LookAheadRateControl")] #[serde(skip_serializing_if = "Option::is_none")] pub look_ahead_rate_control: Option, - ///

For QVBR: See the tooltip for Quality level

+ ///

For QVBR: See the tooltip for Quality level

/// ///

For VBR: Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video.

#[serde(rename = "MaxBitrate")] @@ -1926,7 +1966,7 @@ pub struct H264Settings { #[serde(rename = "QvbrQualityLevel")] #[serde(skip_serializing_if = "Option::is_none")] pub qvbr_quality_level: Option, - ///

Rate control mode.

+ ///

Rate control mode.

/// ///

QVBR: Quality will match the specified quality level except when it is constrained by the /// maximum bitrate. Recommended if you or your viewers pay for bandwidth.

@@ -1985,6 +2025,167 @@ pub struct H264Settings { pub timecode_insertion: Option, } +///

H265 Color Space Settings

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct H265ColorSpaceSettings { + #[serde(rename = "ColorSpacePassthroughSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub color_space_passthrough_settings: Option, + #[serde(rename = "Hdr10Settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub hdr_10_settings: Option, + #[serde(rename = "Rec601Settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rec_601_settings: Option, + #[serde(rename = "Rec709Settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rec_709_settings: Option, +} + +///

H265 Settings

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct H265Settings { + ///

Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality.

+ #[serde(rename = "AdaptiveQuantization")] + #[serde(skip_serializing_if = "Option::is_none")] + pub adaptive_quantization: Option, + ///

Indicates that AFD values will be written into the output stream. If afdSignaling is "auto", the system will try to preserve the input AFD value (in cases where multiple AFD values are valid). If set to "fixed", the AFD value will be the value configured in the fixedAfd parameter.

+ #[serde(rename = "AfdSignaling")] + #[serde(skip_serializing_if = "Option::is_none")] + pub afd_signaling: Option, + ///

Whether or not EML should insert an Alternative Transfer Function SEI message to support backwards compatibility with non-HDR decoders and displays.

+ #[serde(rename = "AlternativeTransferFunction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub alternative_transfer_function: Option, + ///

Average bitrate in bits/second. Required when the rate control mode is VBR or CBR. Not used for QVBR. In an MS Smooth output group, each output must have a unique value when its bitrate is rounded down to the nearest multiple of 1000.

+ #[serde(rename = "Bitrate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bitrate: Option, + ///

Size of buffer (HRD buffer model) in bits.

+ #[serde(rename = "BufSize")] + #[serde(skip_serializing_if = "Option::is_none")] + pub buf_size: Option, + ///

Includes colorspace metadata in the output.

+ #[serde(rename = "ColorMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub color_metadata: Option, + ///

Color Space settings

+ #[serde(rename = "ColorSpaceSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub color_space_settings: Option, + ///

Four bit AFD value to write on all frames of video in the output stream. Only valid when afdSignaling is set to 'Fixed'.

+ #[serde(rename = "FixedAfd")] + #[serde(skip_serializing_if = "Option::is_none")] + pub fixed_afd: Option, + ///

If set to enabled, adjust quantization within each frame to reduce flicker or 'pop' on I-frames.

+ #[serde(rename = "FlickerAq")] + #[serde(skip_serializing_if = "Option::is_none")] + pub flicker_aq: Option, + ///

Framerate denominator.

+ #[serde(rename = "FramerateDenominator")] + pub framerate_denominator: i64, + ///

Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps.

+ #[serde(rename = "FramerateNumerator")] + pub framerate_numerator: i64, + ///

Frequency of closed GOPs. In streaming applications, it is recommended that this be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly as possible. Setting this value to 0 will break output segmenting.

+ #[serde(rename = "GopClosedCadence")] + #[serde(skip_serializing_if = "Option::is_none")] + pub gop_closed_cadence: Option, + ///

GOP size (keyframe interval) in units of either frames or seconds per gopSizeUnits. Must be greater than zero.

+ #[serde(rename = "GopSize")] + #[serde(skip_serializing_if = "Option::is_none")] + pub gop_size: Option, + ///

Indicates if the gopSize is specified in frames or seconds. If seconds the system will convert the gopSize into a frame count at run time.

+ #[serde(rename = "GopSizeUnits")] + #[serde(skip_serializing_if = "Option::is_none")] + pub gop_size_units: Option, + ///

H.265 Level.

+ #[serde(rename = "Level")] + #[serde(skip_serializing_if = "Option::is_none")] + pub level: Option, + ///

Amount of lookahead. A value of low can decrease latency and memory usage, while high can produce better quality for certain content.

+ #[serde(rename = "LookAheadRateControl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub look_ahead_rate_control: Option, + ///

For QVBR: See the tooltip for Quality level

+ #[serde(rename = "MaxBitrate")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_bitrate: Option, + ///

Only meaningful if sceneChangeDetect is set to enabled. Enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting I-interval. The normal cadence resumes for the next GOP. Note: Maximum GOP stretch = GOP size + Min-I-interval - 1

+ #[serde(rename = "MinIInterval")] + #[serde(skip_serializing_if = "Option::is_none")] + pub min_i_interval: Option, + ///

Pixel Aspect Ratio denominator.

+ #[serde(rename = "ParDenominator")] + #[serde(skip_serializing_if = "Option::is_none")] + pub par_denominator: Option, + ///

Pixel Aspect Ratio numerator.

+ #[serde(rename = "ParNumerator")] + #[serde(skip_serializing_if = "Option::is_none")] + pub par_numerator: Option, + ///

H.265 Profile.

+ #[serde(rename = "Profile")] + #[serde(skip_serializing_if = "Option::is_none")] + pub profile: Option, + ///

Controls the target quality for the video encode. Applies only when the rate control mode is QVBR. Set values for the QVBR quality level field and Max bitrate field that suit your most important viewing devices. Recommended values are: + /// - Primary screen: Quality level: 8 to 10. Max bitrate: 4M + /// - PC or tablet: Quality level: 7. Max bitrate: 1.5M to 3M + /// - Smartphone: Quality level: 6. Max bitrate: 1M to 1.5M

+ #[serde(rename = "QvbrQualityLevel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub qvbr_quality_level: Option, + ///

Rate control mode.

+ /// + ///

QVBR: Quality will match the specified quality level except when it is constrained by the + /// maximum bitrate. Recommended if you or your viewers pay for bandwidth.

+ /// + ///

CBR: Quality varies, depending on the video complexity. Recommended only if you distribute + /// your assets to devices that cannot handle variable bitrates.

+ #[serde(rename = "RateControlMode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub rate_control_mode: Option, + ///

Sets the scan type of the output to progressive or top-field-first interlaced.

+ #[serde(rename = "ScanType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub scan_type: Option, + ///

Scene change detection.

+ #[serde(rename = "SceneChangeDetect")] + #[serde(skip_serializing_if = "Option::is_none")] + pub scene_change_detect: Option, + ///

Number of slices per picture. Must be less than or equal to the number of macroblock rows for progressive pictures, and less than or equal to half the number of macroblock rows for interlaced pictures. + /// This field is optional; when no value is specified the encoder will choose the number of slices based on encode resolution.

+ #[serde(rename = "Slices")] + #[serde(skip_serializing_if = "Option::is_none")] + pub slices: Option, + ///

H.265 Tier.

+ #[serde(rename = "Tier")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tier: Option, + ///

Determines how timecodes should be inserted into the video elementary stream. + /// - 'disabled': Do not include timecodes + /// - 'picTimingSei': Pass through picture timing SEI messages from the source specified in Timecode Config

+ #[serde(rename = "TimecodeInsertion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timecode_insertion: Option, +} + +///

Hdr10 Settings

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Hdr10Settings { + ///

Maximum Content Light Level + /// An integer metadata value defining the maximum light level, in nits, + /// of any single pixel within an encoded HDR video stream or file.

+ #[serde(rename = "MaxCll")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_cll: Option, + ///

Maximum Frame Average Light Level + /// An integer metadata value defining the maximum average light level, in nits, + /// for any single frame within an encoded HDR video stream or file.

+ #[serde(rename = "MaxFall")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_fall: Option, +} + ///

Hls Akamai Settings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct HlsAkamaiSettings { @@ -2329,9 +2530,13 @@ pub struct HlsWebdavSettings { pub restart_delay: Option, } +///

Settings to configure an action so that it occurs immediately. This is only supported for input switch actions currently.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ImmediateModeScheduleActionStartSettings {} + ///

Placeholder documentation for Input

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Input { ///

The Unique ARN of the input (generated, immutable).

#[serde(rename = "Arn")] @@ -2354,6 +2559,11 @@ pub struct Input { #[serde(rename = "InputClass")] #[serde(skip_serializing_if = "Option::is_none")] pub input_class: Option, + ///

Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes + /// during input switch actions. Presently, this functionality only works with MP4_FILE inputs.

+ #[serde(rename = "InputSourceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_source_type: Option, ///

A list of MediaConnect Flows for this input.

#[serde(rename = "MediaConnectFlows")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2414,9 +2624,25 @@ pub struct InputChannelLevel { pub input_channel: i64, } +///

Settings to let you create a clip of the file input, in order to set up the input to ingest only a portion of the file.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct InputClippingSettings { + ///

The source of the timecodes in the source being clipped.

+ #[serde(rename = "InputTimecodeSource")] + pub input_timecode_source: String, + ///

Settings to identify the start of the clip.

+ #[serde(rename = "StartTimecode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_timecode: Option, + ///

Settings to identify the end of the clip.

+ #[serde(rename = "StopTimecode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub stop_timecode: Option, +} + ///

The settings for a PUSH type input.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputDestination { ///

The system-generated static IP address of endpoint. /// It remains fixed for the lifetime of the input.

@@ -2449,7 +2675,7 @@ pub struct InputDestinationRequest { ///

The properties for a VPC type input destination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputDestinationVpc { ///

The availability zone of the Input destination.

#[serde(rename = "AvailabilityZone")] @@ -2504,7 +2730,7 @@ pub struct InputLossBehavior { ///

An Input Security Group

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputSecurityGroup { ///

Unique ARN of Input Security Group

#[serde(rename = "Arn")] @@ -2578,7 +2804,7 @@ pub struct InputSettings { ///

The settings for a PULL type input.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputSource { ///

The key used to extract the password from EC2 Parameter store.

#[serde(rename = "PasswordParam")] @@ -2630,12 +2856,20 @@ pub struct InputSpecification { pub resolution: Option, } -///

Settings for the action to switch an input.

+///

Settings for the "switch input" action: to switch from ingesting one input to ingesting another input.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct InputSwitchScheduleActionSettings { - ///

The name of the input attachment that should be switched to by this action.

+ ///

The name of the input attachment (not the name of the input!) to switch to. The name is specified in the channel configuration.

#[serde(rename = "InputAttachmentNameReference")] pub input_attachment_name_reference: String, + ///

Settings to let you create a clip of the file input, in order to set up the input to ingest only a portion of the file.

+ #[serde(rename = "InputClippingSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_clipping_settings: Option, + ///

The value for the variable portion of the URL for the dynamic input, for this instance of the input. Each time you use the same dynamic input in an input switch action, you can provide a different value, in order to connect the input to a different content source.

+ #[serde(rename = "UrlPath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub url_path: Option>, } ///

Settings for a private VPC Input. @@ -2657,7 +2891,7 @@ pub struct InputVpcRequest { ///

Whitelist rule

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InputWhitelistRule { ///

The IPv4 CIDR that's whitelisted.

#[serde(rename = "Cidr")] @@ -2695,7 +2929,7 @@ pub struct ListChannelsRequest { ///

Placeholder documentation for ListChannelsResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListChannelsResponse { #[serde(rename = "Channels")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2718,7 +2952,7 @@ pub struct ListInputSecurityGroupsRequest { ///

Placeholder documentation for ListInputSecurityGroupsResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInputSecurityGroupsResponse { ///

List of input security groups

#[serde(rename = "InputSecurityGroups")] @@ -2742,7 +2976,7 @@ pub struct ListInputsRequest { ///

Placeholder documentation for ListInputsResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInputsResponse { #[serde(rename = "Inputs")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2801,7 +3035,7 @@ pub struct ListOfferingsRequest { ///

Placeholder documentation for ListOfferingsResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOfferingsResponse { ///

Token to retrieve the next page of results

#[serde(rename = "NextToken")] @@ -2858,7 +3092,7 @@ pub struct ListReservationsRequest { ///

Placeholder documentation for ListReservationsResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListReservationsResponse { ///

Token to retrieve the next page of results

#[serde(rename = "NextToken")] @@ -2879,7 +3113,7 @@ pub struct ListTagsForResourceRequest { ///

Placeholder documentation for ListTagsForResourceResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { #[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -3150,7 +3384,7 @@ pub struct M3u8Settings { ///

The settings for a MediaConnect Flow.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MediaConnectFlow { ///

The unique ARN of the MediaConnect Flow being used as a source.

#[serde(rename = "FlowArn")] @@ -3175,7 +3409,7 @@ pub struct MediaPackageGroupSettings { pub destination: OutputLocationRef, } -///

Media Package Output Destination Settings

+///

MediaPackage Output Destination Settings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MediaPackageOutputDestinationSettings { ///

ID of the channel in MediaPackage that is the destination for this output group. You do not need to specify the individual inputs in MediaPackage; MediaLive will handle the connection of the two MediaLive pipelines to the two MediaPackage inputs. The MediaPackage channel and MediaLive channel must be in the same region.

@@ -3297,6 +3531,11 @@ pub struct MsSmoothGroupSettings { ///

Ms Smooth Output Settings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MsSmoothOutputSettings { + ///

Only applicable when this output is referencing an H.265 video description. + /// Specifies whether MP4 segments should be packaged as HEV1 or HVC1.

+ #[serde(rename = "H265PackagingType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub h265_packaging_type: Option, ///

String concatenated to the end of the destination filename. Required for multiple outputs of the same type.

#[serde(rename = "NameModifier")] #[serde(skip_serializing_if = "Option::is_none")] @@ -3318,7 +3557,7 @@ pub struct NetworkInputSettings { ///

Reserved resources available for purchase

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Offering { ///

Unique offering ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:offering:87654321'

#[serde(rename = "Arn")] @@ -3514,6 +3753,24 @@ pub struct PauseStateScheduleActionSettings { pub pipelines: Option>, } +///

Runtime details of a pipeline when a channel is running.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct PipelineDetail { + ///

The name of the active input attachment currently being ingested by this pipeline.

+ #[serde(rename = "ActiveInputAttachmentName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub active_input_attachment_name: Option, + ///

The name of the input switch schedule action that occurred most recently and that resulted in the switch to the current input attachment for this pipeline.

+ #[serde(rename = "ActiveInputSwitchActionName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub active_input_switch_action_name: Option, + ///

Pipeline ID

+ #[serde(rename = "PipelineId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pipeline_id: Option, +} + ///

Settings for pausing a pipeline.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PipelinePauseStateSettings { @@ -3551,13 +3808,21 @@ pub struct PurchaseOfferingRequest { ///

Placeholder documentation for PurchaseOfferingResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PurchaseOfferingResponse { #[serde(rename = "Reservation")] #[serde(skip_serializing_if = "Option::is_none")] pub reservation: Option, } +///

Rec601 Settings

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Rec601Settings {} + +///

Rec709 Settings

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Rec709Settings {} + ///

Remix Settings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RemixSettings { @@ -3577,7 +3842,7 @@ pub struct RemixSettings { ///

Reserved resources available to use

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Reservation { ///

Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'

#[serde(rename = "Arn")] @@ -3655,7 +3920,7 @@ pub struct Reservation { ///

Resource configuration (codec, resolution, bitrate, ...)

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReservationResourceSpecification { ///

Channel class, e.g. 'STANDARD'

#[serde(rename = "ChannelClass")] @@ -3800,17 +4065,22 @@ pub struct ScheduleActionSettings { pub static_image_deactivate_settings: Option, } -///

Settings to specify the start time for an action.

+///

Settings to specify when an action should occur. Only one of the options must be selected.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ScheduleActionStartSettings { - ///

Holds the start time for the action.

+ ///

Option for specifying the start time for an action.

#[serde(rename = "FixedModeScheduleActionStartSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub fixed_mode_schedule_action_start_settings: Option, - ///

Specifies an action to follow for scheduling this action.

+ ///

Option for specifying an action as relative to another action.

#[serde(rename = "FollowModeScheduleActionStartSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub follow_mode_schedule_action_start_settings: Option, + ///

Option for specifying an action that should be applied immediately.

+ #[serde(rename = "ImmediateModeScheduleActionStartSettings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub immediate_mode_schedule_action_start_settings: + Option, } ///

Scte20 Plus Embedded Destination Settings

@@ -4014,7 +4284,7 @@ pub struct StartChannelRequest { ///

Placeholder documentation for StartChannelResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartChannelResponse { ///

The unique arn of the channel.

#[serde(rename = "Arn")] @@ -4056,6 +4326,10 @@ pub struct StartChannelResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + ///

Runtime details for the pipelines of a running channel.

+ #[serde(rename = "PipelineDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pipeline_details: Option>, ///

The number of currently healthy pipelines.

#[serde(rename = "PipelinesRunningCount")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4073,6 +4347,15 @@ pub struct StartChannelResponse { pub tags: Option<::std::collections::HashMap>, } +///

Settings to identify the start of the clip.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StartTimecode { + ///

The timecode for the frame where you want to start the clip. Optional; if not specified, the clip starts at first frame in the file. Enter the timecode as HH:MM:SS:FF or HH:MM:SS;FF.

+ #[serde(rename = "Timecode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timecode: Option, +} + ///

Settings for the action to activate a static image.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StaticImageActivateScheduleActionSettings { @@ -4152,7 +4435,7 @@ pub struct StopChannelRequest { ///

Placeholder documentation for StopChannelResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopChannelResponse { ///

The unique arn of the channel.

#[serde(rename = "Arn")] @@ -4194,6 +4477,10 @@ pub struct StopChannelResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + ///

Runtime details for the pipelines of a running channel.

+ #[serde(rename = "PipelineDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pipeline_details: Option>, ///

The number of currently healthy pipelines.

#[serde(rename = "PipelinesRunningCount")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4211,6 +4498,19 @@ pub struct StopChannelResponse { pub tags: Option<::std::collections::HashMap>, } +///

Settings to identify the end of the clip.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StopTimecode { + ///

If you specify a StopTimecode in an input (in order to clip the file), you can specify if you want the clip to exclude (the default) or include the frame specified by the timecode.

+ #[serde(rename = "LastFrameClippingBehavior")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_frame_clipping_behavior: Option, + ///

The timecode for the frame where you want to stop the clip. Optional; if not specified, the clip continues to the end of the file. Enter the timecode as HH:MM:SS:FF or HH:MM:SS;FF.

+ #[serde(rename = "Timecode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub timecode: Option, +} + ///

Teletext Destination Settings

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TeletextDestinationSettings {} @@ -4308,7 +4608,7 @@ pub struct UpdateChannelClassRequest { ///

Placeholder documentation for UpdateChannelClassResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateChannelClassResponse { #[serde(rename = "Channel")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4352,7 +4652,7 @@ pub struct UpdateChannelRequest { ///

Placeholder documentation for UpdateChannelResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateChannelResponse { #[serde(rename = "Channel")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4397,7 +4697,7 @@ pub struct UpdateInputRequest { ///

Placeholder documentation for UpdateInputResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateInputResponse { #[serde(rename = "Input")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4422,7 +4722,7 @@ pub struct UpdateInputSecurityGroupRequest { ///

Placeholder documentation for UpdateInputSecurityGroupResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateInputSecurityGroupResponse { #[serde(rename = "SecurityGroup")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4443,7 +4743,7 @@ pub struct UpdateReservationRequest { ///

Placeholder documentation for UpdateReservationResponse

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateReservationResponse { #[serde(rename = "Reservation")] #[serde(skip_serializing_if = "Option::is_none")] @@ -4466,6 +4766,9 @@ pub struct VideoCodecSettings { #[serde(rename = "H264Settings")] #[serde(skip_serializing_if = "Option::is_none")] pub h264_settings: Option, + #[serde(rename = "H265Settings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub h265_settings: Option, } ///

Video settings for this stream.

@@ -4503,7 +4806,7 @@ pub struct VideoDescription { ///

Specifies a particular video stream within an input source. An input may have only a single video selector.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VideoSelector { - ///

Specifies the colorspace of an input. This setting works in tandem with colorSpaceConversion to determine if any conversion will be performed.

+ ///

Specifies the color space of an input. This setting works in tandem with colorSpaceUsage and a video description's colorSpaceSettingsChoice to determine if any conversion will be performed.

#[serde(rename = "ColorSpace")] #[serde(skip_serializing_if = "Option::is_none")] pub color_space: Option, @@ -6913,10 +7216,7 @@ impl MediaLiveClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MediaLiveClient { - MediaLiveClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6930,10 +7230,14 @@ impl MediaLiveClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MediaLiveClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MediaLiveClient { + MediaLiveClient { client, region } } } diff --git a/rusoto/services/mediapackage/Cargo.toml b/rusoto/services/mediapackage/Cargo.toml index 33a130ce153..afdbdc675f9 100644 --- a/rusoto/services/mediapackage/Cargo.toml +++ b/rusoto/services/mediapackage/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mediapackage" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mediapackage/README.md b/rusoto/services/mediapackage/README.md index ef9b7595660..26b0c97548d 100644 --- a/rusoto/services/mediapackage/README.md +++ b/rusoto/services/mediapackage/README.md @@ -23,9 +23,16 @@ To use `rusoto_mediapackage` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_mediapackage = "0.40.0" +rusoto_mediapackage = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mediapackage/src/custom/mod.rs b/rusoto/services/mediapackage/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mediapackage/src/custom/mod.rs +++ b/rusoto/services/mediapackage/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mediapackage/src/generated.rs b/rusoto/services/mediapackage/src/generated.rs index 6be5258be45..6439df3dc0c 100644 --- a/rusoto/services/mediapackage/src/generated.rs +++ b/rusoto/services/mediapackage/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

A Channel resource configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Channel { ///

The Amazon Resource Name (ARN) assigned to the Channel.

#[serde(rename = "Arn")] @@ -62,7 +61,7 @@ pub struct CmafEncryption { ///

A Common Media Application Format (CMAF) packaging configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CmafPackage { #[serde(rename = "Encryption")] #[serde(skip_serializing_if = "Option::is_none")] @@ -126,7 +125,7 @@ pub struct CreateChannelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateChannelResponse { ///

The Amazon Resource Name (ARN) assigned to the Channel.

#[serde(rename = "Arn")] @@ -199,7 +198,7 @@ pub struct CreateOriginEndpointRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateOriginEndpointResponse { ///

The Amazon Resource Name (ARN) assigned to the OriginEndpoint.

#[serde(rename = "Arn")] @@ -270,6 +269,12 @@ pub struct DashEncryption { ///

A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DashPackage { + #[serde(rename = "AdTriggers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ad_triggers: Option>, + #[serde(rename = "AdsOnDeliveryRestrictions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ads_on_delivery_restrictions: Option, #[serde(rename = "Encryption")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption: Option, @@ -326,7 +331,7 @@ pub struct DeleteChannelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteChannelResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -337,7 +342,7 @@ pub struct DeleteOriginEndpointRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteOriginEndpointResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -348,7 +353,7 @@ pub struct DescribeChannelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeChannelResponse { ///

The Amazon Resource Name (ARN) assigned to the Channel.

#[serde(rename = "Arn")] @@ -378,7 +383,7 @@ pub struct DescribeOriginEndpointRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOriginEndpointResponse { ///

The Amazon Resource Name (ARN) assigned to the OriginEndpoint.

#[serde(rename = "Arn")] @@ -461,7 +466,7 @@ pub struct HlsEncryption { ///

An HTTP Live Streaming (HLS) ingest resource configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HlsIngest { ///

A list of endpoints to which the source stream should be sent.

#[serde(rename = "IngestEndpoints")] @@ -471,7 +476,7 @@ pub struct HlsIngest { ///

A HTTP Live Streaming (HLS) manifest configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HlsManifest { ///

This setting controls how ad markers are included in the packaged OriginEndpoint. /// "NONE" will omit all SCTE-35 ad markers from the output. @@ -533,6 +538,12 @@ pub struct HlsManifestCreateOrUpdateParameters { #[serde(rename = "AdMarkers")] #[serde(skip_serializing_if = "Option::is_none")] pub ad_markers: Option, + #[serde(rename = "AdTriggers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ad_triggers: Option>, + #[serde(rename = "AdsOnDeliveryRestrictions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ads_on_delivery_restrictions: Option, ///

The ID of the manifest. The ID must be unique within the OriginEndpoint and it cannot be changed after it is created.

#[serde(rename = "Id")] pub id: String, @@ -580,6 +591,12 @@ pub struct HlsPackage { #[serde(rename = "AdMarkers")] #[serde(skip_serializing_if = "Option::is_none")] pub ad_markers: Option, + #[serde(rename = "AdTriggers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ad_triggers: Option>, + #[serde(rename = "AdsOnDeliveryRestrictions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ads_on_delivery_restrictions: Option, #[serde(rename = "Encryption")] #[serde(skip_serializing_if = "Option::is_none")] pub encryption: Option, @@ -625,7 +642,7 @@ pub struct HlsPackage { ///

An endpoint for ingesting source content for a Channel.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IngestEndpoint { ///

The system generated unique identifier for the IngestEndpoint

#[serde(rename = "Id")] @@ -658,7 +675,7 @@ pub struct ListChannelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListChannelsResponse { ///

A list of Channel records.

#[serde(rename = "Channels")] @@ -687,7 +704,7 @@ pub struct ListOriginEndpointsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOriginEndpointsResponse { ///

A token that can be used to resume pagination from the end of the collection.

#[serde(rename = "NextToken")] @@ -706,7 +723,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { #[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] @@ -741,7 +758,7 @@ pub struct MssPackage { ///

An OriginEndpoint resource configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OriginEndpoint { ///

The Amazon Resource Name (ARN) assigned to the OriginEndpoint.

#[serde(rename = "Arn")] @@ -806,7 +823,7 @@ pub struct RotateChannelCredentialsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RotateChannelCredentialsResponse { ///

The Amazon Resource Name (ARN) assigned to the Channel.

#[serde(rename = "Arn")] @@ -839,7 +856,7 @@ pub struct RotateIngestEndpointCredentialsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RotateIngestEndpointCredentialsResponse { ///

The Amazon Resource Name (ARN) assigned to the Channel.

#[serde(rename = "Arn")] @@ -932,7 +949,7 @@ pub struct UpdateChannelRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateChannelResponse { ///

The Amazon Resource Name (ARN) assigned to the Channel.

#[serde(rename = "Arn")] @@ -997,7 +1014,7 @@ pub struct UpdateOriginEndpointRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateOriginEndpointResponse { ///

The Amazon Resource Name (ARN) assigned to the OriginEndpoint.

#[serde(rename = "Arn")] @@ -2040,10 +2057,7 @@ impl MediaPackageClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MediaPackageClient { - MediaPackageClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2057,10 +2071,14 @@ impl MediaPackageClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MediaPackageClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MediaPackageClient { + MediaPackageClient { client, region } } } diff --git a/rusoto/services/mediastore/Cargo.toml b/rusoto/services/mediastore/Cargo.toml index f09cd49f7d2..bdaff3ab88b 100644 --- a/rusoto/services/mediastore/Cargo.toml +++ b/rusoto/services/mediastore/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mediastore" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mediastore/README.md b/rusoto/services/mediastore/README.md index c89e176fa87..40e417854e2 100644 --- a/rusoto/services/mediastore/README.md +++ b/rusoto/services/mediastore/README.md @@ -23,9 +23,16 @@ To use `rusoto_mediastore` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_mediastore = "0.40.0" +rusoto_mediastore = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mediastore/src/custom/mod.rs b/rusoto/services/mediastore/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mediastore/src/custom/mod.rs +++ b/rusoto/services/mediastore/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mediastore/src/generated.rs b/rusoto/services/mediastore/src/generated.rs index 834bdaaebe9..2950a99885e 100644 --- a/rusoto/services/mediastore/src/generated.rs +++ b/rusoto/services/mediastore/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

This section describes operations that you can perform on an AWS Elemental MediaStore container.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Container { ///

The Amazon Resource Name (ARN) of the container. The ARN has the following format:

arn:aws:<region>:<account that owns this container>:container/<name of container>

For example: arn:aws:mediastore:us-west-2:111122223333:container/movies

#[serde(rename = "ARN")] @@ -82,10 +81,14 @@ pub struct CreateContainerInput { ///

The name for the container. The name must be from 1 to 255 characters. Container names must be unique to your AWS account within a specific region. As an example, you could create a container named movies in every region, as long as you don’t have an existing container with that name.

#[serde(rename = "ContainerName")] pub container_name: String, + ///

An array of key:value pairs that you define. These values can be anything that you want. Typically, the tag key represents a category (such as "environment") and the tag value represents a specific value within that category (such as "test," "development," or "production"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateContainerOutput { ///

ContainerARN: The Amazon Resource Name (ARN) of the newly created container. The ARN has the following format: arn:aws:<region>:<account that owns this container>:container/<name of container>. For example: arn:aws:mediastore:us-west-2:111122223333:container/movies

ContainerName: The container name as specified in the request.

CreationTime: Unix time stamp.

Status: The status of container creation or deletion. The status is one of the following: CREATING, ACTIVE, or DELETING. While the service is creating the container, the status is CREATING. When an endpoint is available, the status changes to ACTIVE.

The return value does not include the container's endpoint. To make downstream requests, you must obtain this value by using DescribeContainer or ListContainers.

#[serde(rename = "Container")] @@ -100,7 +103,7 @@ pub struct DeleteContainerInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteContainerOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -111,7 +114,7 @@ pub struct DeleteContainerPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteContainerPolicyOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -122,7 +125,7 @@ pub struct DeleteCorsPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCorsPolicyOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -133,7 +136,7 @@ pub struct DeleteLifecyclePolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLifecyclePolicyOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -145,7 +148,7 @@ pub struct DescribeContainerInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeContainerOutput { ///

The name of the queried container.

#[serde(rename = "Container")] @@ -161,7 +164,7 @@ pub struct GetContainerPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetContainerPolicyOutput { ///

The contents of the access policy.

#[serde(rename = "Policy")] @@ -176,7 +179,7 @@ pub struct GetCorsPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCorsPolicyOutput { ///

The CORS policy assigned to the container.

#[serde(rename = "CorsPolicy")] @@ -191,7 +194,7 @@ pub struct GetLifecyclePolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLifecyclePolicyOutput { ///

The object lifecycle policy that is assigned to the container.

#[serde(rename = "LifecyclePolicy")] @@ -211,7 +214,7 @@ pub struct ListContainersInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListContainersOutput { ///

The names of the containers.

#[serde(rename = "Containers")] @@ -222,6 +225,22 @@ pub struct ListContainersOutput { pub next_token: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceInput { + ///

The Amazon Resource Name (ARN) for the container.

+ #[serde(rename = "Resource")] + pub resource: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceOutput { + ///

An array of key:value pairs that are assigned to the container.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PutContainerPolicyInput { ///

The name of the container.

@@ -233,7 +252,7 @@ pub struct PutContainerPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutContainerPolicyOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -247,7 +266,7 @@ pub struct PutCorsPolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutCorsPolicyOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -261,7 +280,7 @@ pub struct PutLifecyclePolicyInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLifecyclePolicyOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -272,7 +291,7 @@ pub struct StartAccessLoggingInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartAccessLoggingOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -283,9 +302,50 @@ pub struct StopAccessLoggingInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopAccessLoggingOutput {} +///

A collection of tags associated with a container. Each tag consists of a key:value pair, which can be anything you define. Typically, the tag key represents a category (such as "environment") and the tag value represents a specific value within that category (such as "test," "development," or "production"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Tag { + ///

Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as "customer." Tag keys are case-sensitive.

+ #[serde(rename = "Key")] + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option, + ///

Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as "companyA" or "companyB." Tag values are case-sensitive.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceInput { + ///

The Amazon Resource Name (ARN) for the container.

+ #[serde(rename = "Resource")] + pub resource: String, + ///

An array of key:value pairs that you want to add to the container. You need to specify only the tags that you want to add or update. For example, suppose a container already has two tags (customer:CompanyA and priority:High). You want to change the priority tag and also add a third tag (type:Contract). For TagResource, you specify the following tags: priority:Medium, type:Contract. The result is that your container has three tags: customer:CompanyA, priority:Medium, and type:Contract.

+ #[serde(rename = "Tags")] + pub tags: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceOutput {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceInput { + ///

The Amazon Resource Name (ARN) for the container.

+ #[serde(rename = "Resource")] + pub resource: String, + ///

A comma-separated list of keys for tags that you want to remove from the container. For example, if your container has two tags (customer:CompanyA and priority:High) and you want to remove one of the tags (priority:High), you specify the key for the tag that you want to remove (priority).

+ #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceOutput {} + /// Errors returned by CreateContainer #[derive(Debug, PartialEq)] pub enum CreateContainerError { @@ -782,6 +842,55 @@ impl Error for ListContainersError { } } } +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

The container that you specified in the request already exists or is being updated.

+ ContainerInUse(String), + ///

The container that you specified in the request does not exist.

+ ContainerNotFound(String), + ///

The service is temporarily unavailable.

+ InternalServerError(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ContainerInUseException" => { + return RusotoError::Service(ListTagsForResourceError::ContainerInUse(err.msg)) + } + "ContainerNotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::ContainerNotFound( + err.msg, + )) + } + "InternalServerError" => { + return RusotoError::Service(ListTagsForResourceError::InternalServerError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::ContainerInUse(ref cause) => cause, + ListTagsForResourceError::ContainerNotFound(ref cause) => cause, + ListTagsForResourceError::InternalServerError(ref cause) => cause, + } + } +} /// Errors returned by PutContainerPolicy #[derive(Debug, PartialEq)] pub enum PutContainerPolicyError { @@ -1021,6 +1130,96 @@ impl Error for StopAccessLoggingError { } } } +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

The container that you specified in the request already exists or is being updated.

+ ContainerInUse(String), + ///

The container that you specified in the request does not exist.

+ ContainerNotFound(String), + ///

The service is temporarily unavailable.

+ InternalServerError(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ContainerInUseException" => { + return RusotoError::Service(TagResourceError::ContainerInUse(err.msg)) + } + "ContainerNotFoundException" => { + return RusotoError::Service(TagResourceError::ContainerNotFound(err.msg)) + } + "InternalServerError" => { + return RusotoError::Service(TagResourceError::InternalServerError(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::ContainerInUse(ref cause) => cause, + TagResourceError::ContainerNotFound(ref cause) => cause, + TagResourceError::InternalServerError(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

The container that you specified in the request already exists or is being updated.

+ ContainerInUse(String), + ///

The container that you specified in the request does not exist.

+ ContainerNotFound(String), + ///

The service is temporarily unavailable.

+ InternalServerError(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "ContainerInUseException" => { + return RusotoError::Service(UntagResourceError::ContainerInUse(err.msg)) + } + "ContainerNotFoundException" => { + return RusotoError::Service(UntagResourceError::ContainerNotFound(err.msg)) + } + "InternalServerError" => { + return RusotoError::Service(UntagResourceError::InternalServerError(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::ContainerInUse(ref cause) => cause, + UntagResourceError::ContainerNotFound(ref cause) => cause, + UntagResourceError::InternalServerError(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the MediaStore API. MediaStore clients implement this trait. pub trait MediaStore { ///

Creates a storage container to hold objects. A container is similar to a bucket in the Amazon S3 service.

@@ -1083,6 +1282,12 @@ pub trait MediaStore { input: ListContainersInput, ) -> RusotoFuture; + ///

Returns a list of the tags assigned to the specified container.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceInput, + ) -> RusotoFuture; + ///

Creates an access policy for the specified container to restrict the users and clients that can access it. For information about the data that is included in an access policy, see the AWS Identity and Access Management User Guide.

For this release of the REST API, you can create only one policy for a container. If you enter PutContainerPolicy twice, the second command modifies the existing policy.

fn put_container_policy( &self, @@ -1112,6 +1317,18 @@ pub trait MediaStore { &self, input: StopAccessLoggingInput, ) -> RusotoFuture; + + ///

Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be "customer" and the tag value might be "companyA." You can specify one or more tags to add to each container. You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

+ fn tag_resource( + &self, + input: TagResourceInput, + ) -> RusotoFuture; + + ///

Removes tags from the specified container. You can specify one or more tags to remove.

+ fn untag_resource( + &self, + input: UntagResourceInput, + ) -> RusotoFuture; } /// A client for the MediaStore API. #[derive(Clone)] @@ -1125,10 +1342,7 @@ impl MediaStoreClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MediaStoreClient { - MediaStoreClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1142,10 +1356,14 @@ impl MediaStoreClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MediaStoreClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MediaStoreClient { + MediaStoreClient { client, region } } } @@ -1438,6 +1656,34 @@ impl MediaStore for MediaStoreClient { }) } + ///

Returns a list of the tags assigned to the specified container.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "mediastore", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "MediaStore_20170901.ListTagsForResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + ///

Creates an access policy for the specified container to restrict the users and clients that can access it. For information about the data that is included in an access policy, see the AWS Identity and Access Management User Guide.

For this release of the REST API, you can create only one policy for a container. If you enter PutContainerPolicy twice, the second command modifies the existing policy.

fn put_container_policy( &self, @@ -1582,4 +1828,62 @@ impl MediaStore for MediaStoreClient { } }) } + + ///

Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be "customer" and the tag value might be "companyA." You can specify one or more tags to add to each container. You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

+ fn tag_resource( + &self, + input: TagResourceInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "mediastore", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "MediaStore_20170901.TagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Removes tags from the specified container. You can specify one or more tags to remove.

+ fn untag_resource( + &self, + input: UntagResourceInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "mediastore", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "MediaStore_20170901.UntagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } } diff --git a/rusoto/services/mediatailor/Cargo.toml b/rusoto/services/mediatailor/Cargo.toml index ac5b6f51a8e..6038c5d53b1 100644 --- a/rusoto/services/mediatailor/Cargo.toml +++ b/rusoto/services/mediatailor/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mediatailor" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mediatailor/README.md b/rusoto/services/mediatailor/README.md index b2d38934354..1c349929722 100644 --- a/rusoto/services/mediatailor/README.md +++ b/rusoto/services/mediatailor/README.md @@ -23,9 +23,16 @@ To use `rusoto_mediatailor` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_mediatailor = "0.40.0" +rusoto_mediatailor = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mediatailor/src/custom/mod.rs b/rusoto/services/mediatailor/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mediatailor/src/custom/mod.rs +++ b/rusoto/services/mediatailor/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mediatailor/src/generated.rs b/rusoto/services/mediatailor/src/generated.rs index 38cfa8c2eed..490edc609a0 100644 --- a/rusoto/services/mediatailor/src/generated.rs +++ b/rusoto/services/mediatailor/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -40,7 +39,7 @@ pub struct CdnConfiguration { ///

The configuration for DASH content.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DashConfiguration { ///

The URL generated by MediaTailor to initiate a playback session. The session uses server-side reporting. This setting is ignored in PUT operations.

#[serde(rename = "ManifestEndpointPrefix")] @@ -77,7 +76,7 @@ pub struct DeletePlaybackConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePlaybackConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -88,7 +87,7 @@ pub struct GetPlaybackConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPlaybackConfigurationResponse { ///

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

#[serde(rename = "AdDecisionServerUrl")] @@ -142,7 +141,7 @@ pub struct GetPlaybackConfigurationResponse { ///

The configuration for HLS content.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HlsConfiguration { ///

The URL that is used to initiate a playback session for devices that support Apple HLS. The session uses server-side reporting.

#[serde(rename = "ManifestEndpointPrefix")] @@ -163,7 +162,7 @@ pub struct ListPlaybackConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPlaybackConfigurationsResponse { ///

Array of playback configurations. This might be all the available configurations or a subset, depending on the settings that you provide and the total number of configurations stored.

#[serde(rename = "Items")] @@ -183,7 +182,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

A comma-separated list of tag key:value pairs. For example: /// { @@ -198,7 +197,7 @@ pub struct ListTagsForResourceResponse { ///

The AWSMediaTailor configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PlaybackConfiguration { ///

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

#[serde(rename = "AdDecisionServerUrl")] @@ -287,7 +286,7 @@ pub struct PutPlaybackConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutPlaybackConfigurationResponse { ///

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

#[serde(rename = "AdDecisionServerUrl")] @@ -617,10 +616,7 @@ impl MediaTailorClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MediaTailorClient { - MediaTailorClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -634,10 +630,14 @@ impl MediaTailorClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MediaTailorClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MediaTailorClient { + MediaTailorClient { client, region } } } diff --git a/rusoto/services/meteringmarketplace/Cargo.toml b/rusoto/services/meteringmarketplace/Cargo.toml index 9b194850486..da70debcdef 100644 --- a/rusoto/services/meteringmarketplace/Cargo.toml +++ b/rusoto/services/meteringmarketplace/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_meteringmarketplace" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/meteringmarketplace/README.md b/rusoto/services/meteringmarketplace/README.md index 86222369dcf..9afa4256c60 100644 --- a/rusoto/services/meteringmarketplace/README.md +++ b/rusoto/services/meteringmarketplace/README.md @@ -23,9 +23,16 @@ To use `rusoto_meteringmarketplace` in your application, add it as a dependency ```toml [dependencies] -rusoto_meteringmarketplace = "0.40.0" +rusoto_meteringmarketplace = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/meteringmarketplace/src/custom/mod.rs b/rusoto/services/meteringmarketplace/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/meteringmarketplace/src/custom/mod.rs +++ b/rusoto/services/meteringmarketplace/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/meteringmarketplace/src/generated.rs b/rusoto/services/meteringmarketplace/src/generated.rs index b2e5294c2b4..c800e71c723 100644 --- a/rusoto/services/meteringmarketplace/src/generated.rs +++ b/rusoto/services/meteringmarketplace/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -37,7 +36,7 @@ pub struct BatchMeterUsageRequest { ///

Contains the UsageRecords processed by BatchMeterUsage and any records that have failed due to transient error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchMeterUsageResult { ///

Contains all UsageRecords processed by BatchMeterUsage. These records were either honored by AWS Marketplace Metering Service or were invalid.

#[serde(rename = "Results")] @@ -71,7 +70,7 @@ pub struct MeterUsageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MeterUsageResult { ///

Metering record id.

#[serde(rename = "MeteringRecordId")] @@ -94,7 +93,7 @@ pub struct RegisterUsageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterUsageResult { ///

(Optional) Only included when public key version has expired

#[serde(rename = "PublicKeyRotationTimestamp")] @@ -116,7 +115,7 @@ pub struct ResolveCustomerRequest { ///

The result of the ResolveCustomer operation. Contains the CustomerIdentifier and product code.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResolveCustomerResult { ///

The CustomerIdentifier is used to identify an individual customer in your application. Calls to BatchMeterUsage require CustomerIdentifiers for each UsageRecord.

#[serde(rename = "CustomerIdentifier")] @@ -148,7 +147,7 @@ pub struct UsageRecord { ///

A UsageRecordResult indicates the status of a given UsageRecord processed by BatchMeterUsage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsageRecordResult { ///

The MeteringRecordId is a unique identifier for this metering event.

#[serde(rename = "MeteringRecordId")] @@ -484,10 +483,7 @@ impl MarketplaceMeteringClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MarketplaceMeteringClient { - MarketplaceMeteringClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -501,10 +497,14 @@ impl MarketplaceMeteringClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MarketplaceMeteringClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MarketplaceMeteringClient { + MarketplaceMeteringClient { client, region } } } diff --git a/rusoto/services/mgh/Cargo.toml b/rusoto/services/mgh/Cargo.toml index c01f03343f9..cae02ea33a2 100644 --- a/rusoto/services/mgh/Cargo.toml +++ b/rusoto/services/mgh/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mgh" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mgh/README.md b/rusoto/services/mgh/README.md index 4666f1ff1c3..2ff9eef1110 100644 --- a/rusoto/services/mgh/README.md +++ b/rusoto/services/mgh/README.md @@ -23,9 +23,16 @@ To use `rusoto_mgh` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_mgh = "0.40.0" +rusoto_mgh = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mgh/src/custom/mod.rs b/rusoto/services/mgh/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mgh/src/custom/mod.rs +++ b/rusoto/services/mgh/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mgh/src/generated.rs b/rusoto/services/mgh/src/generated.rs index 699ddacc835..daf2e3b8813 100644 --- a/rusoto/services/mgh/src/generated.rs +++ b/rusoto/services/mgh/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -42,7 +41,7 @@ pub struct AssociateCreatedArtifactRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateCreatedArtifactResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -63,7 +62,7 @@ pub struct AssociateDiscoveredResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDiscoveredResourceResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -78,7 +77,7 @@ pub struct CreateProgressUpdateStreamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProgressUpdateStreamResult {} ///

An ARN of the AWS cloud resource target receiving the migration (e.g., AMI, EC2 instance, RDS instance, etc.).

@@ -105,7 +104,7 @@ pub struct DeleteProgressUpdateStreamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProgressUpdateStreamResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -116,7 +115,7 @@ pub struct DescribeApplicationStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeApplicationStateResult { ///

Status of the application - Not Started, In-Progress, Complete.

#[serde(rename = "ApplicationStatus")] @@ -139,7 +138,7 @@ pub struct DescribeMigrationTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMigrationTaskResult { ///

Object encapsulating information about the migration task.

#[serde(rename = "MigrationTask")] @@ -165,7 +164,7 @@ pub struct DisassociateCreatedArtifactRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateCreatedArtifactResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -186,7 +185,7 @@ pub struct DisassociateDiscoveredResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDiscoveredResourceResult {} ///

Object representing the on-premises resource being migrated.

@@ -216,7 +215,7 @@ pub struct ImportMigrationTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportMigrationTaskResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -238,7 +237,7 @@ pub struct ListCreatedArtifactsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCreatedArtifactsResult { ///

List of created artifacts up to the maximum number of results specified in the request.

#[serde(rename = "CreatedArtifactList")] @@ -269,7 +268,7 @@ pub struct ListDiscoveredResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDiscoveredResourcesResult { ///

Returned list of discovered resources associated with the given MigrationTask.

#[serde(rename = "DiscoveredResourceList")] @@ -298,7 +297,7 @@ pub struct ListMigrationTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListMigrationTasksResult { ///

Lists the migration task's summary which includes: MigrationTaskName, ProgressPercent, ProgressUpdateStream, Status, and the UpdateDateTime for each task.

#[serde(rename = "MigrationTaskSummaryList")] @@ -323,7 +322,7 @@ pub struct ListProgressUpdateStreamsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProgressUpdateStreamsResult { ///

If there are more streams created than the max result, return the next token to be passed to the next call as a bookmark of where to start from.

#[serde(rename = "NextToken")] @@ -337,7 +336,7 @@ pub struct ListProgressUpdateStreamsResult { ///

Represents a migration task in a migration tool.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MigrationTask { ///

Unique identifier that references the migration task.

#[serde(rename = "MigrationTaskName")] @@ -363,7 +362,7 @@ pub struct MigrationTask { ///

MigrationTaskSummary includes MigrationTaskName, ProgressPercent, ProgressUpdateStream, Status, and UpdateDateTime for each task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MigrationTaskSummary { ///

Unique identifier that references the migration task.

#[serde(rename = "MigrationTaskName")] @@ -406,7 +405,7 @@ pub struct NotifyApplicationStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotifyApplicationStateResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -433,12 +432,12 @@ pub struct NotifyMigrationTaskStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotifyMigrationTaskStateResult {} ///

Summary of the AWS resource used for access control that is implicitly linked to your AWS account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProgressUpdateStreamSummary { ///

The name of the ProgressUpdateStream.

#[serde(rename = "ProgressUpdateStreamName")] @@ -464,7 +463,7 @@ pub struct PutResourceAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutResourceAttributesResult {} ///

Attribute associated with a resource.

Note the corresponding format required per type listed below:

IPV4

x.x.x.x

where x is an integer in the range [0,255]

IPV6

y : y : y : y : y : y : y : y

where y is a hexadecimal between 0 and FFFF. [0, FFFF]

MAC_ADDRESS

^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$

FQDN

^[^<>{}\\/?,=\p{Cntrl}]{1,256}$

@@ -1830,10 +1829,7 @@ impl MigrationHubClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MigrationHubClient { - MigrationHubClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1847,10 +1843,14 @@ impl MigrationHubClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MigrationHubClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MigrationHubClient { + MigrationHubClient { client, region } } } diff --git a/rusoto/services/mobile/Cargo.toml b/rusoto/services/mobile/Cargo.toml index 19290a98816..d7e28898c44 100644 --- a/rusoto/services/mobile/Cargo.toml +++ b/rusoto/services/mobile/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mobile" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -21,14 +21,16 @@ serde = "1.0.2" serde_derive = "1.0.2" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mobile/README.md b/rusoto/services/mobile/README.md index 8145e7b91cc..4053bfa1d52 100644 --- a/rusoto/services/mobile/README.md +++ b/rusoto/services/mobile/README.md @@ -23,9 +23,16 @@ To use `rusoto_mobile` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_mobile = "0.40.0" +rusoto_mobile = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mobile/src/custom/mod.rs b/rusoto/services/mobile/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mobile/src/custom/mod.rs +++ b/rusoto/services/mobile/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mobile/src/generated.rs b/rusoto/services/mobile/src/generated.rs index 2a524c01d15..31b9946075f 100644 --- a/rusoto/services/mobile/src/generated.rs +++ b/rusoto/services/mobile/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; ///

The details of the bundle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BundleDetails { #[serde(rename = "availablePlatforms")] #[serde(skip_serializing_if = "Option::is_none")] @@ -76,7 +75,7 @@ pub struct CreateProjectRequest { ///

Result structure used in response to a request to create a project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProjectResult { ///

Detailed information about the created AWS Mobile Hub project.

#[serde(rename = "details")] @@ -94,7 +93,7 @@ pub struct DeleteProjectRequest { ///

Result structure used in response to request to delete a project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProjectResult { ///

Resources which were deleted.

#[serde(rename = "deletedResources")] @@ -116,7 +115,7 @@ pub struct DescribeBundleRequest { ///

Result structure contains the details of the bundle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBundleResult { ///

The details of the bundle.

#[serde(rename = "details")] @@ -138,7 +137,7 @@ pub struct DescribeProjectRequest { ///

Result structure used for requests of project details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProjectResult { #[serde(rename = "details")] #[serde(skip_serializing_if = "Option::is_none")] @@ -163,7 +162,7 @@ pub struct ExportBundleRequest { ///

Result structure which contains link to download custom-generated SDK and tool packages used to integrate mobile web or app clients with backed AWS resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportBundleResult { ///

URL which contains the custom-generated SDK and tool packages used to integrate the client mobile app or web app with the AWS resources created by the AWS Mobile Hub project.

#[serde(rename = "downloadUrl")] @@ -181,7 +180,7 @@ pub struct ExportProjectRequest { ///

Result structure used for requests to export project configuration details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportProjectResult { ///

URL which can be used to download the exported project configuation file(s).

#[serde(rename = "downloadUrl")] @@ -212,7 +211,7 @@ pub struct ListBundlesRequest { ///

Result structure contains a list of all available bundles with details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBundlesResult { ///

A list of bundles.

#[serde(rename = "bundleList")] @@ -239,7 +238,7 @@ pub struct ListProjectsRequest { ///

Result structure used for requests to list projects in AWS Mobile Hub.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProjectsResult { #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -251,7 +250,7 @@ pub struct ListProjectsResult { ///

Detailed information about an AWS Mobile Hub project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectDetails { ///

Website URL for this project in the AWS Mobile Hub console.

#[serde(rename = "consoleUrl")] @@ -284,7 +283,7 @@ pub struct ProjectDetails { ///

Summary information about an AWS Mobile Hub project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProjectSummary { ///

Name of the project.

#[serde(rename = "name")] @@ -298,7 +297,7 @@ pub struct ProjectSummary { ///

Information about an instance of an AWS resource associated with a project.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -336,7 +335,7 @@ pub struct UpdateProjectRequest { ///

Result structure used for requests to updated project configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProjectResult { ///

Detailed information about the updated AWS Mobile Hub project.

#[serde(rename = "details")] @@ -979,10 +978,7 @@ impl MobileClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MobileClient { - MobileClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -996,10 +992,14 @@ impl MobileClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MobileClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MobileClient { + MobileClient { client, region } } } diff --git a/rusoto/services/mq/Cargo.toml b/rusoto/services/mq/Cargo.toml index 3301ddae553..d27c2c576d5 100644 --- a/rusoto/services/mq/Cargo.toml +++ b/rusoto/services/mq/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mq" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mq/README.md b/rusoto/services/mq/README.md index d588fc0a896..8d71fa9116c 100644 --- a/rusoto/services/mq/README.md +++ b/rusoto/services/mq/README.md @@ -23,9 +23,16 @@ To use `rusoto_mq` in your application, add it as a dependency in your `Cargo.to ```toml [dependencies] -rusoto_mq = "0.40.0" +rusoto_mq = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mq/src/custom/mod.rs b/rusoto/services/mq/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mq/src/custom/mod.rs +++ b/rusoto/services/mq/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mq/src/generated.rs b/rusoto/services/mq/src/generated.rs index 07e800d5d46..19c662e3cb3 100644 --- a/rusoto/services/mq/src/generated.rs +++ b/rusoto/services/mq/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

Name of the availability zone.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AvailabilityZone { ///

Id for the availability zone.

#[serde(rename = "Name")] @@ -37,7 +36,7 @@ pub struct AvailabilityZone { ///

Types of broker engines.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BrokerEngineType { ///

The type of broker engine.

#[serde(rename = "EngineType")] @@ -51,7 +50,7 @@ pub struct BrokerEngineType { ///

Returns information about all brokers.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BrokerInstance { ///

The URL of the broker's ActiveMQ Web Console.

#[serde(rename = "ConsoleURL")] @@ -69,7 +68,7 @@ pub struct BrokerInstance { ///

Option for host instance type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BrokerInstanceOption { ///

The list of available az.

#[serde(rename = "AvailabilityZones")] @@ -91,7 +90,7 @@ pub struct BrokerInstanceOption { ///

The Amazon Resource Name (ARN) of the broker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BrokerSummary { ///

The Amazon Resource Name (ARN) of the broker.

#[serde(rename = "BrokerArn")] @@ -125,7 +124,7 @@ pub struct BrokerSummary { ///

Returns information about all configurations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Configuration { ///

Required. The ARN of the configuration.

#[serde(rename = "Arn")] @@ -180,7 +179,7 @@ pub struct ConfigurationId { ///

Returns information about the specified configuration revision.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConfigurationRevision { ///

Required. The date and time of the configuration revision.

#[serde(rename = "Created")] @@ -198,7 +197,7 @@ pub struct ConfigurationRevision { ///

Broker configuration information

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Configurations { ///

The current configuration of the broker.

#[serde(rename = "Current")] @@ -237,6 +236,10 @@ pub struct CreateBrokerRequest { #[serde(rename = "DeploymentMode")] #[serde(skip_serializing_if = "Option::is_none")] pub deployment_mode: Option, + ///

Encryption options for the broker.

+ #[serde(rename = "EncryptionOptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub encryption_options: Option, ///

Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ.

#[serde(rename = "EngineType")] #[serde(skip_serializing_if = "Option::is_none")] @@ -261,7 +264,7 @@ pub struct CreateBrokerRequest { #[serde(rename = "PubliclyAccessible")] #[serde(skip_serializing_if = "Option::is_none")] pub publicly_accessible: Option, - ///

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

+ ///

The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers.

#[serde(rename = "SecurityGroups")] #[serde(skip_serializing_if = "Option::is_none")] pub security_groups: Option>, @@ -280,7 +283,7 @@ pub struct CreateBrokerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBrokerResponse { ///

The Amazon Resource Name (ARN) of the broker.

#[serde(rename = "BrokerArn")] @@ -314,7 +317,7 @@ pub struct CreateConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConfigurationResponse { ///

Required. The Amazon Resource Name (ARN) of the configuration.

#[serde(rename = "Arn")] @@ -374,7 +377,7 @@ pub struct CreateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -385,7 +388,7 @@ pub struct DeleteBrokerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBrokerResponse { ///

The unique ID that Amazon MQ generates for the broker.

#[serde(rename = "BrokerId")] @@ -414,7 +417,7 @@ pub struct DeleteUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -434,7 +437,7 @@ pub struct DescribeBrokerEngineTypesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBrokerEngineTypesResponse { ///

List of available engine types and versions.

#[serde(rename = "BrokerEngineTypes")] @@ -471,7 +474,7 @@ pub struct DescribeBrokerInstanceOptionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBrokerInstanceOptionsResponse { ///

List of available broker instance options.

#[serde(rename = "BrokerInstanceOptions")] @@ -495,7 +498,7 @@ pub struct DescribeBrokerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBrokerResponse { ///

Required. Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions. The automatic upgrades occur during the maintenance window of the broker or after a manual broker reboot.

#[serde(rename = "AutoMinorVersionUpgrade")] @@ -533,6 +536,10 @@ pub struct DescribeBrokerResponse { #[serde(rename = "DeploymentMode")] #[serde(skip_serializing_if = "Option::is_none")] pub deployment_mode: Option, + ///

Encryption options for the broker.

+ #[serde(rename = "EncryptionOptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub encryption_options: Option, ///

Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ.

#[serde(rename = "EngineType")] #[serde(skip_serializing_if = "Option::is_none")] @@ -557,11 +564,15 @@ pub struct DescribeBrokerResponse { #[serde(rename = "PendingEngineVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub pending_engine_version: Option, + ///

The list of pending security groups to authorize connections to brokers.

+ #[serde(rename = "PendingSecurityGroups")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pending_security_groups: Option>, ///

Required. Enables connections from applications outside of the VPC that hosts the broker's subnets.

#[serde(rename = "PubliclyAccessible")] #[serde(skip_serializing_if = "Option::is_none")] pub publicly_accessible: Option, - ///

Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

+ ///

The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers.

#[serde(rename = "SecurityGroups")] #[serde(skip_serializing_if = "Option::is_none")] pub security_groups: Option>, @@ -587,7 +598,7 @@ pub struct DescribeConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationResponse { ///

Required. The ARN of the configuration.

#[serde(rename = "Arn")] @@ -638,7 +649,7 @@ pub struct DescribeConfigurationRevisionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConfigurationRevisionResponse { ///

Required. The unique ID that Amazon MQ generates for the configuration.

#[serde(rename = "ConfigurationId")] @@ -669,7 +680,7 @@ pub struct DescribeUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserResponse { ///

Required. The unique ID that Amazon MQ generates for the broker.

#[serde(rename = "BrokerId")] @@ -693,9 +704,21 @@ pub struct DescribeUserResponse { pub username: Option, } +///

Encryption options for the broker.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct EncryptionOptions { + ///

The customer master key (CMK) to use for the AWS Key Management Service (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.

+ #[serde(rename = "KmsKeyId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub kms_key_id: Option, + ///

Enables the use of an AWS owned CMK using AWS Key Management Service (KMS).

+ #[serde(rename = "UseAwsOwnedKey")] + pub use_aws_owned_key: bool, +} + ///

Id of the engine version.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EngineVersion { ///

Id for the version.

#[serde(rename = "Name")] @@ -716,7 +739,7 @@ pub struct ListBrokersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBrokersResponse { ///

A list of information about all brokers.

#[serde(rename = "BrokerSummaries")] @@ -744,7 +767,7 @@ pub struct ListConfigurationRevisionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConfigurationRevisionsResponse { ///

The unique ID that Amazon MQ generates for the configuration.

#[serde(rename = "ConfigurationId")] @@ -777,7 +800,7 @@ pub struct ListConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConfigurationsResponse { ///

The list of all revisions for the specified configuration.

#[serde(rename = "Configurations")] @@ -801,7 +824,7 @@ pub struct ListTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResponse { ///

The key-value pair for the resource tag.

#[serde(rename = "Tags")] @@ -825,7 +848,7 @@ pub struct ListUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsersResponse { ///

Required. The unique ID that Amazon MQ generates for the broker.

#[serde(rename = "BrokerId")] @@ -860,7 +883,7 @@ pub struct Logs { ///

The list of information about logs currently enabled and pending to be deployed for the specified broker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LogsSummary { ///

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.

#[serde(rename = "Audit")] @@ -886,7 +909,7 @@ pub struct LogsSummary { ///

The list of information about logs to be enabled for the specified broker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PendingLogs { ///

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.

#[serde(rename = "Audit")] @@ -906,12 +929,12 @@ pub struct RebootBrokerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebootBrokerResponse {} ///

Returns information about the XML element or attribute that was sanitized in the configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SanitizationWarning { ///

The name of the XML attribute that has been sanitized.

#[serde(rename = "AttributeName")] @@ -949,10 +972,14 @@ pub struct UpdateBrokerRequest { #[serde(rename = "Logs")] #[serde(skip_serializing_if = "Option::is_none")] pub logs: Option, + ///

The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers.

+ #[serde(rename = "SecurityGroups")] + #[serde(skip_serializing_if = "Option::is_none")] + pub security_groups: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBrokerResponse { ///

The new value of automatic upgrades to new minor version for brokers.

#[serde(rename = "AutoMinorVersionUpgrade")] @@ -974,6 +1001,10 @@ pub struct UpdateBrokerResponse { #[serde(rename = "Logs")] #[serde(skip_serializing_if = "Option::is_none")] pub logs: Option, + ///

The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers.

+ #[serde(rename = "SecurityGroups")] + #[serde(skip_serializing_if = "Option::is_none")] + pub security_groups: Option>, } ///

Updates the specified configuration.

@@ -993,7 +1024,7 @@ pub struct UpdateConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConfigurationResponse { ///

Required. The Amazon Resource Name (ARN) of the configuration.

#[serde(rename = "Arn")] @@ -1045,7 +1076,7 @@ pub struct UpdateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserResponse {} ///

An ActiveMQ user associated with the broker.

@@ -1071,7 +1102,7 @@ pub struct User { ///

Returns information about the status of the changes pending for the ActiveMQ user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserPendingChanges { ///

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

#[serde(rename = "ConsoleAccess")] @@ -1089,7 +1120,7 @@ pub struct UserPendingChanges { ///

Returns a list of all ActiveMQ users.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserSummary { ///

The type of change pending for the ActiveMQ user.

#[serde(rename = "PendingChange")] @@ -2421,10 +2452,7 @@ impl MQClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MQClient { - MQClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2438,10 +2466,14 @@ impl MQClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MQClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MQClient { + MQClient { client, region } } } diff --git a/rusoto/services/mturk/Cargo.toml b/rusoto/services/mturk/Cargo.toml index 332d1de0f9d..255d36b20a2 100644 --- a/rusoto/services/mturk/Cargo.toml +++ b/rusoto/services/mturk/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_mturk" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/mturk/README.md b/rusoto/services/mturk/README.md index 035201c8b08..c78fbc22654 100644 --- a/rusoto/services/mturk/README.md +++ b/rusoto/services/mturk/README.md @@ -23,9 +23,16 @@ To use `rusoto_mturk` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_mturk = "0.40.0" +rusoto_mturk = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/mturk/src/custom/mod.rs b/rusoto/services/mturk/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/mturk/src/custom/mod.rs +++ b/rusoto/services/mturk/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/mturk/src/generated.rs b/rusoto/services/mturk/src/generated.rs index 10e6bf12388..32228da8c57 100644 --- a/rusoto/services/mturk/src/generated.rs +++ b/rusoto/services/mturk/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -36,7 +35,7 @@ pub struct AcceptQualificationRequestRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptQualificationRequestResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -55,12 +54,12 @@ pub struct ApproveAssignmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApproveAssignmentResponse {} ///

The Assignment data structure represents a single assignment of a HIT to a Worker. The assignment tracks the Worker's efforts to complete the HIT, and contains the results for later retrieval.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Assignment { ///

The date and time the Worker accepted the assignment.

#[serde(rename = "AcceptTime")] @@ -131,12 +130,12 @@ pub struct AssociateQualificationWithWorkerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateQualificationWithWorkerResponse {} ///

An object representing a Bonus payment paid to a Worker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BonusPayment { ///

The ID of the assignment associated with this bonus payment.

#[serde(rename = "AssignmentId")] @@ -174,7 +173,7 @@ pub struct CreateAdditionalAssignmentsForHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAdditionalAssignmentsForHITResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -241,7 +240,7 @@ pub struct CreateHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHITResponse { ///

Contains the newly created HIT data. For a description of the HIT data structure as it appears in responses, see the HIT Data Structure documentation.

#[serde(rename = "HIT")] @@ -278,7 +277,7 @@ pub struct CreateHITTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHITTypeResponse { ///

The ID of the newly registered HIT type.

#[serde(rename = "HITTypeId")] @@ -329,7 +328,7 @@ pub struct CreateHITWithHITTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHITWithHITTypeResponse { ///

Contains the newly created HIT data. For a description of the HIT data structure as it appears in responses, see the HIT Data Structure documentation.

#[serde(rename = "HIT")] @@ -379,7 +378,7 @@ pub struct CreateQualificationTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateQualificationTypeResponse { ///

The created Qualification type, returned as a QualificationType data structure.

#[serde(rename = "QualificationType")] @@ -398,7 +397,7 @@ pub struct CreateWorkerBlockRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWorkerBlockResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -409,7 +408,7 @@ pub struct DeleteHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteHITResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -420,7 +419,7 @@ pub struct DeleteQualificationTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteQualificationTypeResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -435,7 +434,7 @@ pub struct DeleteWorkerBlockRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWorkerBlockResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -453,14 +452,14 @@ pub struct DisassociateQualificationFromWorkerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateQualificationFromWorkerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetAccountBalanceRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAccountBalanceResponse { #[serde(rename = "AvailableBalance")] #[serde(skip_serializing_if = "Option::is_none")] @@ -478,7 +477,7 @@ pub struct GetAssignmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAssignmentResponse { ///

The assignment. The response includes one Assignment element.

#[serde(rename = "Assignment")] @@ -501,7 +500,7 @@ pub struct GetFileUploadURLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFileUploadURLResponse { ///

A temporary URL for the file that the Worker uploaded for the answer.

#[serde(rename = "FileUploadURL")] @@ -517,7 +516,7 @@ pub struct GetHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetHITResponse { ///

Contains the requested HIT data.

#[serde(rename = "HIT")] @@ -536,7 +535,7 @@ pub struct GetQualificationScoreRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetQualificationScoreResponse { ///

The Qualification data structure of the Qualification assigned to a user, including the Qualification type and the value (score).

#[serde(rename = "Qualification")] @@ -552,7 +551,7 @@ pub struct GetQualificationTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetQualificationTypeResponse { ///

The returned Qualification Type

#[serde(rename = "QualificationType")] @@ -562,7 +561,7 @@ pub struct GetQualificationTypeResponse { ///

The HIT data structure represents a single HIT, including all the information necessary for a Worker to accept and complete the HIT.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HIT { ///

The length of time, in seconds, that a Worker has to complete the HIT after accepting it.

#[serde(rename = "AssignmentDurationInSeconds")] @@ -679,7 +678,7 @@ pub struct ListAssignmentsForHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssignmentsForHITResponse { ///

The collection of Assignment data structures returned by this call.

#[serde(rename = "Assignments")] @@ -714,7 +713,7 @@ pub struct ListBonusPaymentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBonusPaymentsResponse { ///

A successful request to the ListBonusPayments operation returns a list of BonusPayment objects.

#[serde(rename = "BonusPayments")] @@ -745,7 +744,7 @@ pub struct ListHITsForQualificationTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHITsForQualificationTypeResponse { ///

The list of HIT elements returned by the query.

#[serde(rename = "HITs")] @@ -772,7 +771,7 @@ pub struct ListHITsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHITsResponse { ///

The list of HIT elements returned by the query.

#[serde(rename = "HITs")] @@ -803,7 +802,7 @@ pub struct ListQualificationRequestsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListQualificationRequestsResponse { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -841,7 +840,7 @@ pub struct ListQualificationTypesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListQualificationTypesResponse { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -884,7 +883,7 @@ pub struct ListReviewPolicyResultsForHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListReviewPolicyResultsForHITResponse { ///

The name of the Assignment-level Review Policy. This contains only the PolicyName element.

#[serde(rename = "AssignmentReviewPolicy")] @@ -932,7 +931,7 @@ pub struct ListReviewableHITsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListReviewableHITsResponse { ///

The list of HIT elements returned by the query.

#[serde(rename = "HITs")] @@ -959,7 +958,7 @@ pub struct ListWorkerBlocksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWorkerBlocksResponse { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -994,7 +993,7 @@ pub struct ListWorkersWithQualificationTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWorkersWithQualificationTypeResponse { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1040,7 +1039,7 @@ pub struct NotificationSpecification { ///

When MTurk encounters an issue with notifying the Workers you specified, it returns back this object with failure details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotifyWorkersFailureStatus { ///

Encoded value for the failure type.

#[serde(rename = "NotifyWorkersFailureCode")] @@ -1070,7 +1069,7 @@ pub struct NotifyWorkersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotifyWorkersResponse { ///

When MTurk sends notifications to the list of Workers, it returns back any failures it encounters in this list of NotifyWorkersFailureStatus objects.

#[serde(rename = "NotifyWorkersFailureStatuses")] @@ -1110,7 +1109,7 @@ pub struct PolicyParameter { ///

The Qualification data structure represents a Qualification assigned to a user, including the Qualification type and the value (score).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Qualification { ///

The date and time the Qualification was granted to the Worker. If the Worker's Qualification was revoked, and then re-granted based on a new Qualification request, GrantTime is the date and time of the last call to the AcceptQualificationRequest operation.

#[serde(rename = "GrantTime")] @@ -1139,7 +1138,7 @@ pub struct Qualification { ///

The QualificationRequest data structure represents a request a Worker has made for a Qualification.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QualificationRequest { ///

The Worker's answers for the Qualification type's test contained in a QuestionFormAnswers document, if the type has a test and the Worker has submitted answers. If the Worker does not provide any answers, Answer may be empty.

#[serde(rename = "Answer")] @@ -1192,7 +1191,7 @@ pub struct QualificationRequirement { ///

The QualificationType data structure represents a Qualification type, a description of a property of a Worker that must match the requirements of a HIT for the Worker to be able to accept the HIT. The type also describes how a Worker can obtain a Qualification of that type, such as through a Qualification test.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QualificationType { ///

The answers to the Qualification test specified in the Test parameter.

#[serde(rename = "AnswerKey")] @@ -1259,7 +1258,7 @@ pub struct RejectAssignmentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectAssignmentResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1274,12 +1273,12 @@ pub struct RejectQualificationRequestRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectQualificationRequestResponse {} ///

Both the AssignmentReviewReport and the HITReviewReport elements contains the ReviewActionDetail data structure. This structure is returned multiple times for each action specified in the Review Policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReviewActionDetail { ///

The unique identifier for the action.

#[serde(rename = "ActionId")] @@ -1329,7 +1328,7 @@ pub struct ReviewPolicy { ///

Contains both ReviewResult and ReviewAction elements for a particular HIT.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReviewReport { ///

A list of ReviewAction objects for each action specified in the Review Policy.

#[serde(rename = "ReviewActions")] @@ -1343,7 +1342,7 @@ pub struct ReviewReport { ///

This data structure is returned multiple times for each result specified in the Review Policy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReviewResultDetail { ///

A unique identifier of the Review action result.

#[serde(rename = "ActionId")] @@ -1392,7 +1391,7 @@ pub struct SendBonusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendBonusResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1406,7 +1405,7 @@ pub struct SendTestEventNotificationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendTestEventNotificationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1420,7 +1419,7 @@ pub struct UpdateExpirationForHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateExpirationForHITResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1435,7 +1434,7 @@ pub struct UpdateHITReviewStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateHITReviewStatusResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1449,7 +1448,7 @@ pub struct UpdateHITTypeOfHITRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateHITTypeOfHITResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1468,7 +1467,7 @@ pub struct UpdateNotificationSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNotificationSettingsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1511,7 +1510,7 @@ pub struct UpdateQualificationTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateQualificationTypeResponse { ///

Contains a QualificationType data structure.

#[serde(rename = "QualificationType")] @@ -1521,7 +1520,7 @@ pub struct UpdateQualificationTypeResponse { ///

The WorkerBlock data structure represents a Worker who has been blocked. It has two elements: the WorkerId and the Reason for the block.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkerBlock { ///

A message explaining the reason the Worker was blocked.

#[serde(rename = "Reason")] @@ -3371,10 +3370,7 @@ impl MechanicalTurkClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> MechanicalTurkClient { - MechanicalTurkClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3388,10 +3384,14 @@ impl MechanicalTurkClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - MechanicalTurkClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> MechanicalTurkClient { + MechanicalTurkClient { client, region } } } diff --git a/rusoto/services/neptune/Cargo.toml b/rusoto/services/neptune/Cargo.toml index a2ff557496d..14844f4cbb9 100644 --- a/rusoto/services/neptune/Cargo.toml +++ b/rusoto/services/neptune/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_neptune" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/neptune/README.md b/rusoto/services/neptune/README.md index e79bf2e1f6a..e0d2b5fa2e6 100644 --- a/rusoto/services/neptune/README.md +++ b/rusoto/services/neptune/README.md @@ -23,9 +23,16 @@ To use `rusoto_neptune` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_neptune = "0.40.0" +rusoto_neptune = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/neptune/src/custom/mod.rs b/rusoto/services/neptune/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/neptune/src/custom/mod.rs +++ b/rusoto/services/neptune/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/neptune/src/generated.rs b/rusoto/services/neptune/src/generated.rs index b0e69a3d67d..53ef9ff6308 100644 --- a/rusoto/services/neptune/src/generated.rs +++ b/rusoto/services/neptune/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -368,7 +367,7 @@ impl CharacterSetDeserializer { }) } } -///

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

+///

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs will be exported (or not exported) to CloudWatch Logs.

#[derive(Default, Debug, Clone, PartialEq)] pub struct CloudwatchLogsExportConfiguration { ///

The list of log types to disable.

@@ -636,6 +635,8 @@ pub struct CreateDBClusterMessage { pub db_subnet_group_name: Option, ///

The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Neptune will not create a database in the DB cluster you are creating.

pub database_name: Option, + ///

The list of log types that need to be enabled for exporting to CloudWatch Logs.

+ pub enable_cloudwatch_logs_exports: Option>, ///

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

pub enable_iam_database_authentication: Option, ///

The name of the database engine to be used for this DB cluster.

Valid Values: neptune

@@ -709,6 +710,13 @@ impl CreateDBClusterMessageSerializer { if let Some(ref field_value) = obj.database_name { params.put(&format!("{}{}", prefix, "DatabaseName"), &field_value); } + if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { + LogTypeListSerializer::serialize( + params, + &format!("{}{}", prefix, "EnableCloudwatchLogsExports"), + field_value, + ); + } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), @@ -1471,6 +1479,8 @@ pub struct DBCluster { pub db_cluster_resource_id: Option, ///

Specifies the earliest time to which a database can be restored with point-in-time restore.

pub earliest_restorable_time: Option, + ///

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

+ pub enabled_cloudwatch_logs_exports: Option>, ///

Specifies the connection endpoint for the primary instance of the DB cluster.

pub endpoint: Option, ///

Provides the name of the database engine to be used for this DB cluster.

@@ -1603,6 +1613,14 @@ impl DBClusterDeserializer { stack, )?); } + "EnabledCloudwatchLogsExports" => { + obj.enabled_cloudwatch_logs_exports + .get_or_insert(vec![]) + .extend(LogTypeListDeserializer::deserialize( + "EnabledCloudwatchLogsExports", + stack, + )?); + } "Endpoint" => { obj.endpoint = Some(StringDeserializer::deserialize("Endpoint", stack)?); } @@ -5475,6 +5493,8 @@ pub struct ModifyDBClusterMessage { pub apply_immediately: Option, ///

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

  • Must be a value from 1 to 35

pub backup_retention_period: Option, + ///

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.

+ pub cloudwatch_logs_export_configuration: Option, ///

The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

pub db_cluster_identifier: String, ///

The name of the DB cluster parameter group to use for the DB cluster.

@@ -5517,6 +5537,13 @@ impl ModifyDBClusterMessageSerializer { &field_value, ); } + if let Some(ref field_value) = obj.cloudwatch_logs_export_configuration { + CloudwatchLogsExportConfigurationSerializer::serialize( + params, + &format!("{}{}", prefix, "CloudwatchLogsExportConfiguration"), + field_value, + ); + } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, @@ -7308,6 +7335,8 @@ pub struct RestoreDBClusterFromSnapshotMessage { pub db_subnet_group_name: Option, ///

Not supported.

pub database_name: Option, + ///

The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.

+ pub enable_cloudwatch_logs_exports: Option>, ///

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

pub enable_iam_database_authentication: Option, ///

The database engine to use for the new DB cluster.

Default: The same as source

Constraint: Must be compatible with the engine of the source

@@ -7360,6 +7389,13 @@ impl RestoreDBClusterFromSnapshotMessageSerializer { if let Some(ref field_value) = obj.database_name { params.put(&format!("{}{}", prefix, "DatabaseName"), &field_value); } + if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { + LogTypeListSerializer::serialize( + params, + &format!("{}{}", prefix, "EnableCloudwatchLogsExports"), + field_value, + ); + } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), @@ -7432,6 +7468,8 @@ pub struct RestoreDBClusterToPointInTimeMessage { pub db_cluster_parameter_group_name: Option, ///

The DB subnet group name to use for the new DB cluster.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

pub db_subnet_group_name: Option, + ///

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

+ pub enable_cloudwatch_logs_exports: Option>, ///

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

pub enable_iam_database_authentication: Option, ///

The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following will occur:

  • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.

  • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.

If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.

@@ -7442,7 +7480,7 @@ pub struct RestoreDBClusterToPointInTimeMessage { pub port: Option, ///

The date and time to restore the DB cluster to.

Valid Values: Value must be a time in Universal Coordinated Time (UTC) format

Constraints:

  • Must be before the latest restorable time for the DB instance

  • Must be specified if UseLatestRestorableTime parameter is not provided

  • Cannot be specified if UseLatestRestorableTime parameter is true

  • Cannot be specified if RestoreType parameter is copy-on-write

Example: 2015-03-07T23:45:00Z

pub restore_to_time: Option, - ///

The type of restore to be performed. The only type of restore currently supported is full-copy (the default).

+ ///

The type of restore to be performed. You can specify one of the following values:

  • full-copy - The new DB cluster is restored as a full copy of the source DB cluster.

  • copy-on-write - The new DB cluster is restored as a clone of the source DB cluster.

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

pub restore_type: Option, ///

The identifier of the source DB cluster from which to restore.

Constraints:

  • Must match the identifier of an existing DBCluster.

pub source_db_cluster_identifier: String, @@ -7476,6 +7514,13 @@ impl RestoreDBClusterToPointInTimeMessageSerializer { if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } + if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { + LogTypeListSerializer::serialize( + params, + &format!("{}{}", prefix, "EnableCloudwatchLogsExports"), + field_value, + ); + } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), @@ -12453,7 +12498,7 @@ pub trait Neptune { input: DeleteDBClusterSnapshotMessage, ) -> RusotoFuture; - ///

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon Neptune DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when the SkipFinalSnapshot parameter is set to true.

If the specified DB instance is part of a DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB instance is the only instance in the DB cluster.

+ ///

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon Neptune DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when the SkipFinalSnapshot parameter is set to true.

You can't delete a DB instance if it is the only instance in the DB cluster.

fn delete_db_instance( &self, input: DeleteDBInstanceMessage, @@ -12717,10 +12762,7 @@ impl NeptuneClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> NeptuneClient { - NeptuneClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -12734,10 +12776,14 @@ impl NeptuneClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - NeptuneClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> NeptuneClient { + NeptuneClient { client, region } } } @@ -12802,7 +12848,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12878,7 +12924,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12926,7 +12972,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12974,7 +13020,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13024,7 +13070,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13075,7 +13121,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13123,7 +13169,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13171,7 +13217,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13222,7 +13268,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13270,7 +13316,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13320,7 +13366,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13368,7 +13414,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13419,7 +13465,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13492,7 +13538,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13511,7 +13557,7 @@ impl Neptune for NeptuneClient { }) } - ///

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon Neptune DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when the SkipFinalSnapshot parameter is set to true.

If the specified DB instance is part of a DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB instance is the only instance in the DB cluster.

+ ///

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon Neptune DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when the SkipFinalSnapshot parameter is set to true.

You can't delete a DB instance if it is the only instance in the DB cluster.

fn delete_db_instance( &self, input: DeleteDBInstanceMessage, @@ -13543,7 +13589,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13643,7 +13689,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13693,7 +13739,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13741,7 +13787,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13794,7 +13840,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13842,7 +13888,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13893,7 +13939,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13941,7 +13987,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13991,7 +14037,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14039,7 +14085,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14089,7 +14135,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14137,7 +14183,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14190,7 +14236,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14241,7 +14287,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14289,7 +14335,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14337,7 +14383,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14388,7 +14434,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14437,7 +14483,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14488,7 +14534,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14541,7 +14587,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14592,7 +14638,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14642,7 +14688,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14693,7 +14739,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14741,7 +14787,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14792,7 +14838,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14843,7 +14889,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14891,7 +14937,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14941,7 +14987,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14989,7 +15035,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15037,7 +15083,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15088,7 +15134,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15166,7 +15212,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15239,7 +15285,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15287,7 +15333,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15335,7 +15381,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -15383,7 +15429,7 @@ impl Neptune for NeptuneClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/opsworks/Cargo.toml b/rusoto/services/opsworks/Cargo.toml index d3dc6239cfc..8df53748482 100644 --- a/rusoto/services/opsworks/Cargo.toml +++ b/rusoto/services/opsworks/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_opsworks" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/opsworks/README.md b/rusoto/services/opsworks/README.md index a587981e372..bce9d0e17b8 100644 --- a/rusoto/services/opsworks/README.md +++ b/rusoto/services/opsworks/README.md @@ -23,9 +23,16 @@ To use `rusoto_opsworks` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_opsworks = "0.40.0" +rusoto_opsworks = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/opsworks/src/custom/mod.rs b/rusoto/services/opsworks/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/opsworks/src/custom/mod.rs +++ b/rusoto/services/opsworks/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/opsworks/src/generated.rs b/rusoto/services/opsworks/src/generated.rs index 52d9777a524..645a8383437 100644 --- a/rusoto/services/opsworks/src/generated.rs +++ b/rusoto/services/opsworks/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Describes an agent version.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AgentVersion { ///

The configuration manager.

#[serde(rename = "ConfigurationManager")] @@ -40,7 +39,7 @@ pub struct AgentVersion { ///

A description of the app.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct App { ///

The app ID.

#[serde(rename = "AppId")] @@ -74,7 +73,7 @@ pub struct App { #[serde(rename = "EnableSsl")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_ssl: Option, - ///

An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances. For more information, see Environment Variables.

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variable names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an "Environment: is too large (maximum is 10KB)" message.

+ ///

An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances. For more information, see Environment Variables.

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variable names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an "Environment: is too large (maximum is 20 KB)" message.

#[serde(rename = "Environment")] #[serde(skip_serializing_if = "Option::is_none")] pub environment: Option>, @@ -145,7 +144,7 @@ pub struct AttachElasticLoadBalancerRequest { ///

Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when AWS OpsWorks Stacks starts or stops load-based instances.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AutoScalingThresholds { - ///

Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack.

To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have AWS OpsWorks Stacks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing AWS OpsWorks Stacks to Act on Your Behalf.

+ ///

Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack.

To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have AWS OpsWorks Stacks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing AWS OpsWorks Stacks to Act on Your Behalf.

#[serde(rename = "Alarms")] #[serde(skip_serializing_if = "Option::is_none")] pub alarms: Option>, @@ -175,7 +174,7 @@ pub struct AutoScalingThresholds { pub thresholds_wait_time: Option, } -///

Describes a block device mapping. This data type maps directly to the Amazon EC2 BlockDeviceMapping data type.

+///

Describes a block device mapping. This data type maps directly to the Amazon EC2 BlockDeviceMapping data type.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BlockDeviceMapping { ///

The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and AWS OpsWorks Stacks will provide the correct device name.

@@ -190,7 +189,7 @@ pub struct BlockDeviceMapping { #[serde(rename = "NoDevice")] #[serde(skip_serializing_if = "Option::is_none")] pub no_device: Option, - ///

The virtual device name. For more information, see BlockDeviceMapping.

+ ///

The virtual device name. For more information, see BlockDeviceMapping.

#[serde(rename = "VirtualName")] #[serde(skip_serializing_if = "Option::is_none")] pub virtual_name: Option, @@ -219,7 +218,7 @@ pub struct CloneStackRequest { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

+ ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

#[serde(rename = "ChefConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub chef_configuration: Option, @@ -235,30 +234,31 @@ pub struct CloneStackRequest { #[serde(rename = "ConfigurationManager")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration_manager: Option, + ///

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes.

#[serde(rename = "CustomCookbooksSource")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_cookbooks_source: Option, - ///

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes

+ ///

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, - ///

The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

+ ///

The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

#[serde(rename = "DefaultAvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub default_availability_zone: Option, - ///

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "DefaultInstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub default_instance_profile_arn: Option, - ///

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

+ ///

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

#[serde(rename = "DefaultOs")] #[serde(skip_serializing_if = "Option::is_none")] pub default_os: Option, - ///

The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

+ ///

The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

#[serde(rename = "DefaultRootDeviceType")] #[serde(skip_serializing_if = "Option::is_none")] pub default_root_device_type: Option, - ///

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

+ ///

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

#[serde(rename = "DefaultSshKeyName")] #[serde(skip_serializing_if = "Option::is_none")] pub default_ssh_key_name: Option, @@ -266,7 +266,7 @@ pub struct CloneStackRequest { #[serde(rename = "DefaultSubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub default_subnet_id: Option, - ///

The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

  • Baked_Goods

  • Clouds

  • Europe_Cities

  • Fruits

  • Greek_Deities

  • Legendary_creatures_from_Japan

  • Planets_and_Moons

  • Roman_Deities

  • Scottish_Islands

  • US_Cities

  • Wild_Cats

To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

+ ///

The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

  • Baked_Goods

  • Clouds

  • Europe_Cities

  • Fruits

  • Greek_Deities_and_Titans

  • Legendary_creatures_from_Japan

  • Planets_and_Moons

  • Roman_Deities

  • Scottish_Islands

  • US_Cities

  • Wild_Cats

To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

#[serde(rename = "HostnameTheme")] #[serde(skip_serializing_if = "Option::is_none")] pub hostname_theme: Option, @@ -274,11 +274,11 @@ pub struct CloneStackRequest { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The cloned stack AWS region, such as "ap-northeast-2". For more information about AWS regions, see Regions and Endpoints.

+ ///

The cloned stack AWS region, such as "ap-northeast-2". For more information about AWS regions, see Regions and Endpoints.

#[serde(rename = "Region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, - ///

The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers.

You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

+ ///

The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers.

You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

#[serde(rename = "ServiceRoleArn")] pub service_role_arn: String, ///

The source stack ID.

@@ -288,11 +288,11 @@ pub struct CloneStackRequest { #[serde(rename = "UseCustomCookbooks")] #[serde(skip_serializing_if = "Option::is_none")] pub use_custom_cookbooks: Option, - ///

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

+ ///

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

#[serde(rename = "UseOpsworksSecurityGroups")] #[serde(skip_serializing_if = "Option::is_none")] pub use_opsworks_security_groups: Option, - ///

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

  • If your account supports EC2 Classic, the default value is no VPC.

  • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms.

+ ///

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

  • If your account supports EC2 Classic, the default value is no VPC.

  • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms.

#[serde(rename = "VpcId")] #[serde(skip_serializing_if = "Option::is_none")] pub vpc_id: Option, @@ -300,7 +300,7 @@ pub struct CloneStackRequest { ///

Contains the response to a CloneStack request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CloneStackResult { ///

The cloned stack ID.

#[serde(rename = "StackId")] @@ -321,7 +321,7 @@ pub struct CloudWatchLogsConfiguration { pub log_streams: Option>, } -///

Describes the Amazon CloudWatch logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference.

+///

Describes the Amazon CloudWatch logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CloudWatchLogsLogStream { ///

Specifies the max number of log events in a batch, up to 10000. The default value is 1000.

@@ -336,7 +336,7 @@ pub struct CloudWatchLogsLogStream { #[serde(rename = "BufferDuration")] #[serde(skip_serializing_if = "Option::is_none")] pub buffer_duration: Option, - ///

Specifies how the time stamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference.

+ ///

Specifies how the time stamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference.

#[serde(rename = "DatetimeFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub datetime_format: Option, @@ -372,7 +372,7 @@ pub struct CloudWatchLogsLogStream { ///

Describes a command.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Command { ///

Date and time when the command was acknowledged.

#[serde(rename = "AcknowledgedAt")] @@ -442,7 +442,7 @@ pub struct CreateAppRequest { #[serde(rename = "EnableSsl")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_ssl: Option, - ///

An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instance. For more information, see Environment Variables.

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, "Environment: is too large (maximum is 10KB)."

This parameter is supported only by Chef 11.10 stacks. If you have specified one or more environment variables, you cannot modify the stack's Chef version.

+ ///

An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instance. For more information, see Environment Variables.

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, "Environment: is too large (maximum is 20KB)."

If you have specified one or more environment variables, you cannot modify the stack's Chef version.

#[serde(rename = "Environment")] #[serde(skip_serializing_if = "Option::is_none")] pub environment: Option>, @@ -467,7 +467,7 @@ pub struct CreateAppRequest { ///

Contains the response to a CreateApp request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAppResult { ///

The app ID.

#[serde(rename = "AppId")] @@ -488,7 +488,7 @@ pub struct CreateDeploymentRequest { #[serde(rename = "Comment")] #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - ///

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

+ ///

A string that contains user-defined, custom JSON. You can use this parameter to override some corresponding default stack configuration JSON values. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes and Overriding Attributes With Custom JSON.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, @@ -507,7 +507,7 @@ pub struct CreateDeploymentRequest { ///

Contains the response to a CreateDeployment request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDeploymentResult { ///

The deployment ID, which can be used with other requests to identify the deployment.

#[serde(rename = "DeploymentId")] @@ -521,11 +521,11 @@ pub struct CreateInstanceRequest { #[serde(rename = "AgentVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub agent_version: Option, - ///

A custom AMI ID to be used to create the instance. The AMI should be based on one of the supported operating systems. For more information, see Using Custom AMIs.

If you specify a custom AMI, you must set Os to Custom.

+ ///

A custom AMI ID to be used to create the instance. The AMI should be based on one of the supported operating systems. For more information, see Using Custom AMIs.

If you specify a custom AMI, you must set Os to Custom.

#[serde(rename = "AmiId")] #[serde(skip_serializing_if = "Option::is_none")] pub ami_id: Option, - ///

The instance architecture. The default option is x86_64. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

+ ///

The instance architecture. The default option is x86_64. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

#[serde(rename = "Architecture")] #[serde(skip_serializing_if = "Option::is_none")] pub architecture: Option, @@ -533,11 +533,11 @@ pub struct CreateInstanceRequest { #[serde(rename = "AutoScalingType")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_scaling_type: Option, - ///

The instance Availability Zone. For more information, see Regions and Endpoints.

+ ///

The instance Availability Zone. For more information, see Regions and Endpoints.

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, - ///

An array of BlockDeviceMapping objects that specify the instance's block devices. For more information, see Block Device Mapping. Note that block device mappings are not supported for custom AMIs.

+ ///

An array of BlockDeviceMapping objects that specify the instance's block devices. For more information, see Block Device Mapping. Note that block device mappings are not supported for custom AMIs.

#[serde(rename = "BlockDeviceMappings")] #[serde(skip_serializing_if = "Option::is_none")] pub block_device_mappings: Option>, @@ -553,17 +553,17 @@ pub struct CreateInstanceRequest { #[serde(rename = "InstallUpdatesOnBoot")] #[serde(skip_serializing_if = "Option::is_none")] pub install_updates_on_boot: Option, - ///

The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

+ ///

The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

#[serde(rename = "InstanceType")] pub instance_type: String, ///

An array that contains the instance's layer IDs.

#[serde(rename = "LayerIds")] pub layer_ids: Vec, - ///

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information about the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about supported operating systems, see Operating SystemsFor more information about how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

+ ///

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information about the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about supported operating systems, see Operating SystemsFor more information about how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

#[serde(rename = "Os")] #[serde(skip_serializing_if = "Option::is_none")] pub os: Option, - ///

The instance root device type. For more information, see Storage for the Root Device.

+ ///

The instance root device type. For more information, see Storage for the Root Device.

#[serde(rename = "RootDeviceType")] #[serde(skip_serializing_if = "Option::is_none")] pub root_device_type: Option, @@ -578,7 +578,7 @@ pub struct CreateInstanceRequest { #[serde(rename = "SubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub subnet_id: Option, - ///

The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are valid values for this parameter: dedicated, default, or host. Because there are costs associated with changes in tenancy options, we recommend that you research tenancy options before choosing them for your instances. For more information about dedicated hosts, see Dedicated Hosts Overview and Amazon EC2 Dedicated Hosts. For more information about dedicated instances, see Dedicated Instances and Amazon EC2 Dedicated Instances.

+ ///

The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are valid values for this parameter: dedicated, default, or host. Because there are costs associated with changes in tenancy options, we recommend that you research tenancy options before choosing them for your instances. For more information about dedicated hosts, see Dedicated Hosts Overview and Amazon EC2 Dedicated Hosts. For more information about dedicated instances, see Dedicated Instances and Amazon EC2 Dedicated Instances.

#[serde(rename = "Tenancy")] #[serde(skip_serializing_if = "Option::is_none")] pub tenancy: Option, @@ -590,7 +590,7 @@ pub struct CreateInstanceRequest { ///

Contains the response to a CreateInstance request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateInstanceResult { ///

The instance ID.

#[serde(rename = "InstanceId")] @@ -604,11 +604,11 @@ pub struct CreateLayerRequest { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

+ ///

Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

#[serde(rename = "AutoAssignElasticIps")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_assign_elastic_ips: Option, - ///

For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

+ ///

For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

#[serde(rename = "AutoAssignPublicIps")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_assign_public_ips: Option, @@ -616,11 +616,11 @@ pub struct CreateLayerRequest { #[serde(rename = "CloudWatchLogsConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub cloud_watch_logs_configuration: Option, - ///

The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "CustomInstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_instance_profile_arn: Option, - ///

A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI.

+ ///

A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, @@ -651,7 +651,7 @@ pub struct CreateLayerRequest { #[serde(rename = "Packages")] #[serde(skip_serializing_if = "Option::is_none")] pub packages: Option>, - ///

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference.

+ ///

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference.

#[serde(rename = "Shortname")] pub shortname: String, ///

The layer stack ID.

@@ -672,7 +672,7 @@ pub struct CreateLayerRequest { ///

Contains the response to a CreateLayer request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLayerResult { ///

The layer ID.

#[serde(rename = "LayerId")] @@ -690,7 +690,7 @@ pub struct CreateStackRequest { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

+ ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

#[serde(rename = "ChefConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub chef_configuration: Option, @@ -698,29 +698,30 @@ pub struct CreateStackRequest { #[serde(rename = "ConfigurationManager")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration_manager: Option, + ///

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes.

#[serde(rename = "CustomCookbooksSource")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_cookbooks_source: Option, - ///

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

+ ///

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, - ///

The stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

+ ///

The stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

#[serde(rename = "DefaultAvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub default_availability_zone: Option, - ///

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "DefaultInstanceProfileArn")] pub default_instance_profile_arn: String, - ///

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

+ ///

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

#[serde(rename = "DefaultOs")] #[serde(skip_serializing_if = "Option::is_none")] pub default_os: Option, - ///

The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store. For more information, see Storage for the Root Device.

+ ///

The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store. For more information, see Storage for the Root Device.

#[serde(rename = "DefaultRootDeviceType")] #[serde(skip_serializing_if = "Option::is_none")] pub default_root_device_type: Option, - ///

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

+ ///

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

#[serde(rename = "DefaultSshKeyName")] #[serde(skip_serializing_if = "Option::is_none")] pub default_ssh_key_name: Option, @@ -728,28 +729,28 @@ pub struct CreateStackRequest { #[serde(rename = "DefaultSubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub default_subnet_id: Option, - ///

The stack's host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

  • Baked_Goods

  • Clouds

  • Europe_Cities

  • Fruits

  • Greek_Deities

  • Legendary_creatures_from_Japan

  • Planets_and_Moons

  • Roman_Deities

  • Scottish_Islands

  • US_Cities

  • Wild_Cats

To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

+ ///

The stack's host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

  • Baked_Goods

  • Clouds

  • Europe_Cities

  • Fruits

  • Greek_Deities_and_Titans

  • Legendary_creatures_from_Japan

  • Planets_and_Moons

  • Roman_Deities

  • Scottish_Islands

  • US_Cities

  • Wild_Cats

To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

#[serde(rename = "HostnameTheme")] #[serde(skip_serializing_if = "Option::is_none")] pub hostname_theme: Option, ///

The stack name.

#[serde(rename = "Name")] pub name: String, - ///

The stack's AWS region, such as ap-south-1. For more information about Amazon regions, see Regions and Endpoints.

In the AWS CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the AWS CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the AWS CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage AWS, we recommend that you use regional endpoints for new stacks. The AWS CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic AWS OpsWorks Stacks region.

+ ///

The stack's AWS region, such as ap-south-1. For more information about Amazon regions, see Regions and Endpoints.

In the AWS CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the AWS CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the AWS CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage AWS, we recommend that you use regional endpoints for new stacks. The AWS CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic AWS OpsWorks Stacks region.

#[serde(rename = "Region")] pub region: String, - ///

The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers.

+ ///

The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "ServiceRoleArn")] pub service_role_arn: String, ///

Whether the stack uses custom cookbooks.

#[serde(rename = "UseCustomCookbooks")] #[serde(skip_serializing_if = "Option::is_none")] pub use_custom_cookbooks: Option, - ///

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

+ ///

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

#[serde(rename = "UseOpsworksSecurityGroups")] #[serde(skip_serializing_if = "Option::is_none")] pub use_opsworks_security_groups: Option, - ///

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

  • If your account supports EC2-Classic, the default value is no VPC.

  • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms.

+ ///

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

  • If your account supports EC2-Classic, the default value is no VPC.

  • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms.

#[serde(rename = "VpcId")] #[serde(skip_serializing_if = "Option::is_none")] pub vpc_id: Option, @@ -757,7 +758,7 @@ pub struct CreateStackRequest { ///

Contains the response to a CreateStack request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStackResult { ///

The stack ID, which is an opaque string that you use to identify the stack when performing actions such as DescribeStacks.

#[serde(rename = "StackId")] @@ -767,7 +768,7 @@ pub struct CreateStackResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateUserProfileRequest { - ///

Whether users can specify their own SSH public key through the My Settings page. For more information, see Setting an IAM User's Public SSH Key.

+ ///

Whether users can specify their own SSH public key through the My Settings page. For more information, see Setting an IAM User's Public SSH Key.

#[serde(rename = "AllowSelfManagement")] #[serde(skip_serializing_if = "Option::is_none")] pub allow_self_management: Option, @@ -786,7 +787,7 @@ pub struct CreateUserProfileRequest { ///

Contains the response to a CreateUserProfile request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserProfileResult { ///

The user's IAM ARN.

#[serde(rename = "IamUserArn")] @@ -856,12 +857,13 @@ pub struct DeleteUserProfileRequest { ///

Describes a deployment of a stack or app.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Deployment { ///

The app ID.

#[serde(rename = "AppId")] #[serde(skip_serializing_if = "Option::is_none")] pub app_id: Option, + ///

Used to specify a stack or deployment command.

#[serde(rename = "Command")] #[serde(skip_serializing_if = "Option::is_none")] pub command: Option, @@ -877,7 +879,7 @@ pub struct Deployment { #[serde(rename = "CreatedAt")] #[serde(skip_serializing_if = "Option::is_none")] pub created_at: Option, - ///

A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

+ ///

A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, @@ -968,7 +970,7 @@ pub struct DescribeAgentVersionsRequest { ///

Contains the response to a DescribeAgentVersions request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAgentVersionsResult { ///

The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, not the abbreviated number used by the console.

#[serde(rename = "AgentVersions")] @@ -990,7 +992,7 @@ pub struct DescribeAppsRequest { ///

Contains the response to a DescribeApps request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAppsResult { ///

An array of App objects that describe the specified apps.

#[serde(rename = "Apps")] @@ -1016,7 +1018,7 @@ pub struct DescribeCommandsRequest { ///

Contains the response to a DescribeCommands request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCommandsResult { ///

An array of Command objects that describe each of the specified commands.

#[serde(rename = "Commands")] @@ -1042,7 +1044,7 @@ pub struct DescribeDeploymentsRequest { ///

Contains the response to a DescribeDeployments request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDeploymentsResult { ///

An array of Deployment objects that describe the deployments.

#[serde(rename = "Deployments")] @@ -1072,7 +1074,7 @@ pub struct DescribeEcsClustersRequest { ///

Contains the response to a DescribeEcsClusters request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEcsClustersResult { ///

A list of EcsCluster objects containing the cluster descriptions.

#[serde(rename = "EcsClusters")] @@ -1102,7 +1104,7 @@ pub struct DescribeElasticIpsRequest { ///

Contains the response to a DescribeElasticIps request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeElasticIpsResult { ///

An ElasticIps object that describes the specified Elastic IP addresses.

#[serde(rename = "ElasticIps")] @@ -1124,7 +1126,7 @@ pub struct DescribeElasticLoadBalancersRequest { ///

Contains the response to a DescribeElasticLoadBalancers request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeElasticLoadBalancersResult { ///

A list of ElasticLoadBalancer objects that describe the specified Elastic Load Balancing instances.

#[serde(rename = "ElasticLoadBalancers")] @@ -1150,7 +1152,7 @@ pub struct DescribeInstancesRequest { ///

Contains the response to a DescribeInstances request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstancesResult { ///

An array of Instance objects that describe the instances.

#[serde(rename = "Instances")] @@ -1172,7 +1174,7 @@ pub struct DescribeLayersRequest { ///

Contains the response to a DescribeLayers request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLayersResult { ///

An array of Layer objects that describe the layers.

#[serde(rename = "Layers")] @@ -1189,7 +1191,7 @@ pub struct DescribeLoadBasedAutoScalingRequest { ///

Contains the response to a DescribeLoadBasedAutoScaling request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLoadBasedAutoScalingResult { ///

An array of LoadBasedAutoScalingConfiguration objects that describe each layer's configuration.

#[serde(rename = "LoadBasedAutoScalingConfigurations")] @@ -1199,7 +1201,7 @@ pub struct DescribeLoadBasedAutoScalingResult { ///

Contains the response to a DescribeMyUserProfile request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMyUserProfileResult { ///

A UserProfile object that describes the user's SSH information.

#[serde(rename = "UserProfile")] @@ -1209,7 +1211,7 @@ pub struct DescribeMyUserProfileResult { ///

The response to a DescribeOperatingSystems request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOperatingSystemsResponse { ///

Contains information in response to a DescribeOperatingSystems request.

#[serde(rename = "OperatingSystems")] @@ -1219,7 +1221,7 @@ pub struct DescribeOperatingSystemsResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribePermissionsRequest { - ///

The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM ARNs, see Using Identifiers.

+ ///

The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "IamUserArn")] #[serde(skip_serializing_if = "Option::is_none")] pub iam_user_arn: Option, @@ -1231,7 +1233,7 @@ pub struct DescribePermissionsRequest { ///

Contains the response to a DescribePermissions request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePermissionsResult { ///

An array of Permission objects that describe the stack permissions.

  • If the request object contains only a stack ID, the array contains a Permission object with permissions for each of the stack IAM ARNs.

  • If the request object contains only an IAM ARN, the array contains a Permission object with permissions for each of the user's stack IDs.

  • If the request contains a stack ID and an IAM ARN, the array contains a single Permission object with permissions for the specified stack and IAM ARN.

#[serde(rename = "Permissions")] @@ -1257,7 +1259,7 @@ pub struct DescribeRaidArraysRequest { ///

Contains the response to a DescribeRaidArrays request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRaidArraysResult { ///

A RaidArrays object that describes the specified RAID arrays.

#[serde(rename = "RaidArrays")] @@ -1278,7 +1280,7 @@ pub struct DescribeRdsDbInstancesRequest { ///

Contains the response to a DescribeRdsDbInstances request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRdsDbInstancesResult { ///

An a array of RdsDbInstance objects that describe the instances.

#[serde(rename = "RdsDbInstances")] @@ -1304,7 +1306,7 @@ pub struct DescribeServiceErrorsRequest { ///

Contains the response to a DescribeServiceErrors request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeServiceErrorsResult { ///

An array of ServiceError objects that describe the specified service errors.

#[serde(rename = "ServiceErrors")] @@ -1321,7 +1323,7 @@ pub struct DescribeStackProvisioningParametersRequest { ///

Contains the response to a DescribeStackProvisioningParameters request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStackProvisioningParametersResult { ///

The AWS OpsWorks Stacks agent installer's URL.

#[serde(rename = "AgentInstallerUrl")] @@ -1342,7 +1344,7 @@ pub struct DescribeStackSummaryRequest { ///

Contains the response to a DescribeStackSummary request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStackSummaryResult { ///

A StackSummary object that contains the results.

#[serde(rename = "StackSummary")] @@ -1360,7 +1362,7 @@ pub struct DescribeStacksRequest { ///

Contains the response to a DescribeStacks request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStacksResult { ///

An array of Stack objects that describe the stacks.

#[serde(rename = "Stacks")] @@ -1377,7 +1379,7 @@ pub struct DescribeTimeBasedAutoScalingRequest { ///

Contains the response to a DescribeTimeBasedAutoScaling request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTimeBasedAutoScalingResult { ///

An array of TimeBasedAutoScalingConfiguration objects that describe the configuration for the specified instances.

#[serde(rename = "TimeBasedAutoScalingConfigurations")] @@ -1395,7 +1397,7 @@ pub struct DescribeUserProfilesRequest { ///

Contains the response to a DescribeUserProfiles request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserProfilesResult { ///

A Users object that describes the specified users.

#[serde(rename = "UserProfiles")] @@ -1425,7 +1427,7 @@ pub struct DescribeVolumesRequest { ///

Contains the response to a DescribeVolumes request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeVolumesResult { ///

An array of volume IDs.

#[serde(rename = "Volumes")] @@ -1450,14 +1452,14 @@ pub struct DisassociateElasticIpRequest { pub elastic_ip: String, } -///

Describes an Amazon EBS volume. This data type maps directly to the Amazon EC2 EbsBlockDevice data type.

+///

Describes an Amazon EBS volume. This data type maps directly to the Amazon EC2 EbsBlockDevice data type.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EbsBlockDevice { ///

Whether the volume is deleted on instance termination.

#[serde(rename = "DeleteOnTermination")] #[serde(skip_serializing_if = "Option::is_none")] pub delete_on_termination: Option, - ///

The number of I/O operations per second (IOPS) that the volume supports. For more information, see EbsBlockDevice.

+ ///

The number of I/O operations per second (IOPS) that the volume supports. For more information, see EbsBlockDevice.

#[serde(rename = "Iops")] #[serde(skip_serializing_if = "Option::is_none")] pub iops: Option, @@ -1465,7 +1467,7 @@ pub struct EbsBlockDevice { #[serde(rename = "SnapshotId")] #[serde(skip_serializing_if = "Option::is_none")] pub snapshot_id: Option, - ///

The volume size, in GiB. For more information, see EbsBlockDevice.

+ ///

The volume size, in GiB. For more information, see EbsBlockDevice.

#[serde(rename = "VolumeSize")] #[serde(skip_serializing_if = "Option::is_none")] pub volume_size: Option, @@ -1477,7 +1479,7 @@ pub struct EbsBlockDevice { ///

Describes a registered Amazon ECS cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EcsCluster { ///

The cluster's ARN.

#[serde(rename = "EcsClusterArn")] @@ -1499,7 +1501,7 @@ pub struct EcsCluster { ///

Describes an Elastic IP address.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ElasticIp { ///

The domain.

#[serde(rename = "Domain")] @@ -1517,7 +1519,7 @@ pub struct ElasticIp { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The AWS region. For more information, see Regions and Endpoints.

+ ///

The AWS region. For more information, see Regions and Endpoints.

#[serde(rename = "Region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, @@ -1525,7 +1527,7 @@ pub struct ElasticIp { ///

Describes an Elastic Load Balancing instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ElasticLoadBalancer { ///

A list of Availability Zones.

#[serde(rename = "AvailabilityZones")] @@ -1589,7 +1591,7 @@ pub struct GetHostnameSuggestionRequest { ///

Contains the response to a GetHostnameSuggestion request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetHostnameSuggestionResult { ///

The generated host name.

#[serde(rename = "Hostname")] @@ -1614,7 +1616,7 @@ pub struct GrantAccessRequest { ///

Contains the response to a GrantAccess request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GrantAccessResult { ///

A TemporaryCredential object that contains the data needed to log in to the instance by RDP clients, such as the Microsoft Remote Desktop Connection.

#[serde(rename = "TemporaryCredential")] @@ -1624,13 +1626,13 @@ pub struct GrantAccessResult { ///

Describes an instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Instance { ///

The agent version. This parameter is set to INHERIT if the instance inherits the default stack setting or to a a version number for a fixed agent version.

#[serde(rename = "AgentVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub agent_version: Option, - ///

A custom AMI ID to be used to create the instance. For more information, see Instances

+ ///

A custom AMI ID to be used to create the instance. For more information, see Instances

#[serde(rename = "AmiId")] #[serde(skip_serializing_if = "Option::is_none")] pub ami_id: Option, @@ -1646,7 +1648,7 @@ pub struct Instance { #[serde(rename = "AutoScalingType")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_scaling_type: Option, - ///

The instance Availability Zone. For more information, see Regions and Endpoints.

+ ///

The instance Availability Zone. For more information, see Regions and Endpoints.

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, @@ -1674,7 +1676,7 @@ pub struct Instance { #[serde(rename = "EcsContainerInstanceArn")] #[serde(skip_serializing_if = "Option::is_none")] pub ecs_container_instance_arn: Option, - ///

The instance Elastic IP address .

+ ///

The instance Elastic IP address .

#[serde(rename = "ElasticIp")] #[serde(skip_serializing_if = "Option::is_none")] pub elastic_ip: Option, @@ -1694,7 +1696,7 @@ pub struct Instance { #[serde(rename = "InstanceId")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_id: Option, - ///

The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using Identifiers.

+ ///

The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "InstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_profile_arn: Option, @@ -1746,7 +1748,7 @@ pub struct Instance { #[serde(rename = "ReportedOs")] #[serde(skip_serializing_if = "Option::is_none")] pub reported_os: Option, - ///

The instance's root device type. For more information, see Storage for the Root Device.

+ ///

The instance's root device type. For more information, see Storage for the Root Device.

#[serde(rename = "RootDeviceType")] #[serde(skip_serializing_if = "Option::is_none")] pub root_device_type: Option, @@ -1792,7 +1794,7 @@ pub struct Instance { pub virtualization_type: Option, } -///

Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata service. For more information, see Instance Metadata and User Data.

+///

Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata service. For more information, see Instance Metadata and User Data.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct InstanceIdentity { ///

A JSON document that contains the metadata.

@@ -1807,7 +1809,7 @@ pub struct InstanceIdentity { ///

Describes how many instances a stack has for each status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstancesCount { ///

The number of instances in the Assigning state.

#[serde(rename = "Assigning")] @@ -1893,7 +1895,7 @@ pub struct InstancesCount { ///

Describes a layer.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Layer { ///

The Amazon Resource Number (ARN) of a layer.

#[serde(rename = "Arn")] @@ -1903,11 +1905,11 @@ pub struct Layer { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

+ ///

Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

#[serde(rename = "AutoAssignElasticIps")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_assign_elastic_ips: Option, - ///

For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

+ ///

For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

#[serde(rename = "AutoAssignPublicIps")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_assign_public_ips: Option, @@ -1919,7 +1921,7 @@ pub struct Layer { #[serde(rename = "CreatedAt")] #[serde(skip_serializing_if = "Option::is_none")] pub created_at: Option, - ///

The ARN of the default IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The ARN of the default IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "CustomInstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_instance_profile_arn: Option, @@ -1935,6 +1937,7 @@ pub struct Layer { #[serde(rename = "CustomSecurityGroupIds")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_security_group_ids: Option>, + ///

AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs a set of standard recipes for each event. You can also provide custom recipes for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

#[serde(rename = "DefaultRecipes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_recipes: Option, @@ -2014,7 +2017,7 @@ pub struct ListTagsRequest { ///

Contains the response to a ListTags request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsResult { ///

If a paginated request does not return all of the remaining results, this parameter is set to a token that you can assign to the request object's NextToken parameter to get the next set of results. If the previous paginated request returned all of the remaining results, this parameter is set to null.

#[serde(rename = "NextToken")] @@ -2028,7 +2031,7 @@ pub struct ListTagsResult { ///

Describes a layer's load-based auto scaling configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LoadBasedAutoScalingConfiguration { ///

An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when AWS OpsWorks Stacks reduces the number of instances.

#[serde(rename = "DownScaling")] @@ -2050,17 +2053,17 @@ pub struct LoadBasedAutoScalingConfiguration { ///

Describes supported operating systems in AWS OpsWorks Stacks.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OperatingSystem { ///

Supported configuration manager name and versions for an AWS OpsWorks Stacks operating system.

#[serde(rename = "ConfigurationManagers")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration_managers: Option>, - ///

The ID of a supported operating system, such as Amazon Linux 2017.09.

+ ///

The ID of a supported operating system, such as Amazon Linux 2018.03.

#[serde(rename = "Id")] #[serde(skip_serializing_if = "Option::is_none")] pub id: Option, - ///

The name of the operating system, such as Amazon Linux 2017.09.

+ ///

The name of the operating system, such as Amazon Linux 2018.03.

#[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, @@ -2084,7 +2087,7 @@ pub struct OperatingSystem { ///

A block that contains information about the configuration manager (Chef) and the versions of the configuration manager that are supported for an operating system.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OperatingSystemConfigurationManager { ///

The name of the configuration manager, which is Chef.

#[serde(rename = "Name")] @@ -2098,7 +2101,7 @@ pub struct OperatingSystemConfigurationManager { ///

Describes stack or user permissions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Permission { ///

Whether the user can use SSH.

#[serde(rename = "AllowSsh")] @@ -2108,11 +2111,11 @@ pub struct Permission { #[serde(rename = "AllowSudo")] #[serde(skip_serializing_if = "Option::is_none")] pub allow_sudo: Option, - ///

The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers.

+ ///

The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "IamUserArn")] #[serde(skip_serializing_if = "Option::is_none")] pub iam_user_arn: Option, - ///

The user's permission level, which must be the following:

  • deny

  • show

  • deploy

  • manage

  • iam_only

For more information on the permissions associated with these levels, see Managing User Permissions

+ ///

The user's permission level, which must be the following:

  • deny

  • show

  • deploy

  • manage

  • iam_only

For more information on the permissions associated with these levels, see Managing User Permissions

#[serde(rename = "Level")] #[serde(skip_serializing_if = "Option::is_none")] pub level: Option, @@ -2124,9 +2127,9 @@ pub struct Permission { ///

Describes an instance's RAID array.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RaidArray { - ///

The array's Availability Zone. For more information, see Regions and Endpoints.

+ ///

The array's Availability Zone. For more information, see Regions and Endpoints.

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, @@ -2182,7 +2185,7 @@ pub struct RaidArray { ///

Describes an Amazon RDS instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RdsDbInstance { ///

The instance's address.

#[serde(rename = "Address")] @@ -2266,7 +2269,7 @@ pub struct RegisterEcsClusterRequest { ///

Contains the response to a RegisterEcsCluster request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterEcsClusterResult { ///

The cluster's ARN.

#[serde(rename = "EcsClusterArn")] @@ -2286,7 +2289,7 @@ pub struct RegisterElasticIpRequest { ///

Contains the response to a RegisterElasticIp request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterElasticIpResult { ///

The Elastic IP address.

#[serde(rename = "ElasticIp")] @@ -2327,7 +2330,7 @@ pub struct RegisterInstanceRequest { ///

Contains the response to a RegisterInstanceResult request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterInstanceResult { ///

The registered instance's AWS OpsWorks Stacks ID.

#[serde(rename = "InstanceId")] @@ -2364,7 +2367,7 @@ pub struct RegisterVolumeRequest { ///

Contains the response to a RegisterVolume request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterVolumeResult { ///

The volume ID.

#[serde(rename = "VolumeId")] @@ -2374,7 +2377,7 @@ pub struct RegisterVolumeResult { ///

A registered instance's reported operating system.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReportedOs { ///

The operating system family.

#[serde(rename = "Family")] @@ -2392,7 +2395,7 @@ pub struct ReportedOs { ///

Describes a user's SSH information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SelfUserProfile { ///

The user's IAM ARN.

#[serde(rename = "IamUserArn")] @@ -2414,7 +2417,7 @@ pub struct SelfUserProfile { ///

Describes an AWS OpsWorks Stacks service error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceError { ///

When the error occurred.

#[serde(rename = "CreatedAt")] @@ -2474,7 +2477,7 @@ pub struct SetPermissionRequest { ///

The user's IAM ARN. This can also be a federated user's ARN.

#[serde(rename = "IamUserArn")] pub iam_user_arn: String, - ///

The user's permission level, which must be set to one of the following strings. You cannot set your own permissions level.

  • deny

  • show

  • deploy

  • manage

  • iam_only

For more information about the permissions associated with these levels, see Managing User Permissions.

+ ///

The user's permission level, which must be set to one of the following strings. You cannot set your own permissions level.

  • deny

  • show

  • deploy

  • manage

  • iam_only

For more information about the permissions associated with these levels, see Managing User Permissions.

#[serde(rename = "Level")] #[serde(skip_serializing_if = "Option::is_none")] pub level: Option, @@ -2497,7 +2500,7 @@ pub struct SetTimeBasedAutoScalingRequest { ///

The Shutdown event configuration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ShutdownEventConfiguration { - ///

Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining

+ ///

Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining

#[serde(rename = "DelayUntilElbConnectionsDrained")] #[serde(skip_serializing_if = "Option::is_none")] pub delay_until_elb_connections_drained: Option, @@ -2507,10 +2510,10 @@ pub struct ShutdownEventConfiguration { pub execution_timeout: Option, } -///

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks.

+///

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Source { - ///

When included in a request, the parameter depends on the repository type.

  • For Amazon S3 bundles, set Password to the appropriate IAM secret access key.

  • For HTTP bundles and Subversion repositories, set Password to the password.

For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

+ ///

When included in a request, the parameter depends on the repository type.

  • For Amazon S3 bundles, set Password to the appropriate IAM secret access key.

  • For HTTP bundles and Subversion repositories, set Password to the password.

For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

#[serde(rename = "Password")] #[serde(skip_serializing_if = "Option::is_none")] pub password: Option, @@ -2553,7 +2556,7 @@ pub struct SslConfiguration { ///

Describes a stack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Stack { ///

The agent version. This parameter is set to LATEST for auto-update. or a version number for a fixed agent version.

#[serde(rename = "AgentVersion")] @@ -2567,7 +2570,7 @@ pub struct Stack { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version. For more information, see Create a New Stack.

+ ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version. For more information, see Create a New Stack.

#[serde(rename = "ChefConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub chef_configuration: Option, @@ -2579,18 +2582,19 @@ pub struct Stack { #[serde(rename = "CreatedAt")] #[serde(skip_serializing_if = "Option::is_none")] pub created_at: Option, + ///

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes.

#[serde(rename = "CustomCookbooksSource")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_cookbooks_source: Option, - ///

A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

+ ///

A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, - ///

The stack's default Availability Zone. For more information, see Regions and Endpoints.

+ ///

The stack's default Availability Zone. For more information, see Regions and Endpoints.

#[serde(rename = "DefaultAvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub default_availability_zone: Option, - ///

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "DefaultInstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub default_instance_profile_arn: Option, @@ -2598,7 +2602,7 @@ pub struct Stack { #[serde(rename = "DefaultOs")] #[serde(skip_serializing_if = "Option::is_none")] pub default_os: Option, - ///

The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

+ ///

The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

#[serde(rename = "DefaultRootDeviceType")] #[serde(skip_serializing_if = "Option::is_none")] pub default_root_device_type: Option, @@ -2618,7 +2622,7 @@ pub struct Stack { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

The stack AWS region, such as "ap-northeast-2". For more information about AWS regions, see Regions and Endpoints.

+ ///

The stack AWS region, such as "ap-northeast-2". For more information about AWS regions, see Regions and Endpoints.

#[serde(rename = "Region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, @@ -2659,7 +2663,7 @@ pub struct StackConfigurationManager { ///

Summarizes the number of layers, instances, and apps in a stack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StackSummary { ///

The number of apps.

#[serde(rename = "AppsCount")] @@ -2703,7 +2707,7 @@ pub struct StartStackRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StopInstanceRequest { - ///

Specifies whether to force an instance to stop.

+ ///

Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, adding the Force parameter to the StopInstances API call disassociates the AWS OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the AWS OpsWorks Stacks instance with a new one.

#[serde(rename = "Force")] #[serde(skip_serializing_if = "Option::is_none")] pub force: Option, @@ -2731,7 +2735,7 @@ pub struct TagResourceRequest { ///

Contains the data needed by RDP clients such as the Microsoft Remote Desktop Connection to log in to the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TemporaryCredential { ///

The instance's AWS OpsWorks Stacks ID.

#[serde(rename = "InstanceId")] @@ -2753,7 +2757,7 @@ pub struct TemporaryCredential { ///

Describes an instance's time-based auto scaling configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TimeBasedAutoScalingConfiguration { ///

A WeeklyAutoScalingSchedule object with the instance schedule.

#[serde(rename = "AutoScalingSchedule")] @@ -2818,7 +2822,7 @@ pub struct UpdateAppRequest { #[serde(rename = "EnableSsl")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_ssl: Option, - ///

An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances.For more information, see Environment Variables.

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, "Environment: is too large (maximum is 10KB)."

This parameter is supported only by Chef 11.10 stacks. If you have specified one or more environment variables, you cannot modify the stack's Chef version.

+ ///

An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances.For more information, see Environment Variables.

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, "Environment: is too large (maximum is 20 KB)."

If you have specified one or more environment variables, you cannot modify the stack's Chef version.

#[serde(rename = "Environment")] #[serde(skip_serializing_if = "Option::is_none")] pub environment: Option>, @@ -2857,7 +2861,7 @@ pub struct UpdateInstanceRequest { #[serde(rename = "AmiId")] #[serde(skip_serializing_if = "Option::is_none")] pub ami_id: Option, - ///

The instance architecture. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

+ ///

The instance architecture. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

#[serde(rename = "Architecture")] #[serde(skip_serializing_if = "Option::is_none")] pub architecture: Option, @@ -2880,7 +2884,7 @@ pub struct UpdateInstanceRequest { ///

The instance ID.

#[serde(rename = "InstanceId")] pub instance_id: String, - ///

The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

+ ///

The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

#[serde(rename = "InstanceType")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_type: Option, @@ -2888,7 +2892,7 @@ pub struct UpdateInstanceRequest { #[serde(rename = "LayerIds")] #[serde(skip_serializing_if = "Option::is_none")] pub layer_ids: Option>, - ///

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about supported operating systems, see Operating Systems. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

+ ///

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about supported operating systems, see Operating Systems. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

#[serde(rename = "Os")] #[serde(skip_serializing_if = "Option::is_none")] pub os: Option, @@ -2904,11 +2908,11 @@ pub struct UpdateLayerRequest { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

+ ///

Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

#[serde(rename = "AutoAssignElasticIps")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_assign_elastic_ips: Option, - ///

For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

+ ///

For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

#[serde(rename = "AutoAssignPublicIps")] #[serde(skip_serializing_if = "Option::is_none")] pub auto_assign_public_ips: Option, @@ -2916,11 +2920,11 @@ pub struct UpdateLayerRequest { #[serde(rename = "CloudWatchLogsConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub cloud_watch_logs_configuration: Option, - ///

The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "CustomInstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_instance_profile_arn: Option, - ///

A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON.

+ ///

A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, @@ -2955,7 +2959,7 @@ pub struct UpdateLayerRequest { #[serde(rename = "Packages")] #[serde(skip_serializing_if = "Option::is_none")] pub packages: Option>, - ///

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\A[a-z0-9\-\_\.]+\Z/.

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference

+ ///

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\A[a-z0-9\-\_\.]+\Z/.

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference

#[serde(rename = "Shortname")] #[serde(skip_serializing_if = "Option::is_none")] pub shortname: Option, @@ -3002,7 +3006,7 @@ pub struct UpdateStackRequest { #[serde(rename = "Attributes")] #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<::std::collections::HashMap>, - ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

+ ///

A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

#[serde(rename = "ChefConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub chef_configuration: Option, @@ -3010,30 +3014,31 @@ pub struct UpdateStackRequest { #[serde(rename = "ConfigurationManager")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration_manager: Option, + ///

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes.

#[serde(rename = "CustomCookbooksSource")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_cookbooks_source: Option, - ///

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

+ ///

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format:

"{\"key1\": \"value1\", \"key2\": \"value2\",...}"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

#[serde(rename = "CustomJson")] #[serde(skip_serializing_if = "Option::is_none")] pub custom_json: Option, - ///

The stack's default Availability Zone, which must be in the stack's region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see CreateStack.

+ ///

The stack's default Availability Zone, which must be in the stack's region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see CreateStack.

#[serde(rename = "DefaultAvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub default_availability_zone: Option, - ///

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

+ ///

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

#[serde(rename = "DefaultInstanceProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub default_instance_profile_arn: Option, - ///

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

+ ///

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

#[serde(rename = "DefaultOs")] #[serde(skip_serializing_if = "Option::is_none")] pub default_os: Option, - ///

The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

+ ///

The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

#[serde(rename = "DefaultRootDeviceType")] #[serde(skip_serializing_if = "Option::is_none")] pub default_root_device_type: Option, - ///

A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, AWS OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

+ ///

A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, AWS OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

#[serde(rename = "DefaultSshKeyName")] #[serde(skip_serializing_if = "Option::is_none")] pub default_ssh_key_name: Option, @@ -3041,7 +3046,7 @@ pub struct UpdateStackRequest { #[serde(rename = "DefaultSubnetId")] #[serde(skip_serializing_if = "Option::is_none")] pub default_subnet_id: Option, - ///

The stack's new host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

  • Baked_Goods

  • Clouds

  • Europe_Cities

  • Fruits

  • Greek_Deities

  • Legendary_creatures_from_Japan

  • Planets_and_Moons

  • Roman_Deities

  • Scottish_Islands

  • US_Cities

  • Wild_Cats

To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

+ ///

The stack's new host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

  • Baked_Goods

  • Clouds

  • Europe_Cities

  • Fruits

  • Greek_Deities_and_Titans

  • Legendary_creatures_from_Japan

  • Planets_and_Moons

  • Roman_Deities

  • Scottish_Islands

  • US_Cities

  • Wild_Cats

To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

#[serde(rename = "HostnameTheme")] #[serde(skip_serializing_if = "Option::is_none")] pub hostname_theme: Option, @@ -3060,7 +3065,7 @@ pub struct UpdateStackRequest { #[serde(rename = "UseCustomCookbooks")] #[serde(skip_serializing_if = "Option::is_none")] pub use_custom_cookbooks: Option, - ///

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

+ ///

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

#[serde(rename = "UseOpsworksSecurityGroups")] #[serde(skip_serializing_if = "Option::is_none")] pub use_opsworks_security_groups: Option, @@ -3068,7 +3073,7 @@ pub struct UpdateStackRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateUserProfileRequest { - ///

Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

+ ///

Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

#[serde(rename = "AllowSelfManagement")] #[serde(skip_serializing_if = "Option::is_none")] pub allow_self_management: Option, @@ -3102,9 +3107,9 @@ pub struct UpdateVolumeRequest { ///

Describes a user's SSH information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserProfile { - ///

Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

+ ///

Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

#[serde(rename = "AllowSelfManagement")] #[serde(skip_serializing_if = "Option::is_none")] pub allow_self_management: Option, @@ -3128,9 +3133,9 @@ pub struct UserProfile { ///

Describes an instance's Amazon EBS volume.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Volume { - ///

The volume Availability Zone. For more information, see Regions and Endpoints.

+ ///

The volume Availability Zone. For more information, see Regions and Endpoints.

#[serde(rename = "AvailabilityZone")] #[serde(skip_serializing_if = "Option::is_none")] pub availability_zone: Option, @@ -3142,7 +3147,7 @@ pub struct Volume { #[serde(rename = "Ec2VolumeId")] #[serde(skip_serializing_if = "Option::is_none")] pub ec_2_volume_id: Option, - ///

Specifies whether an Amazon EBS volume is encrypted. For more information, see Amazon EBS Encryption.

+ ///

Specifies whether an Amazon EBS volume is encrypted. For more information, see Amazon EBS Encryption.

#[serde(rename = "Encrypted")] #[serde(skip_serializing_if = "Option::is_none")] pub encrypted: Option, @@ -3166,7 +3171,7 @@ pub struct Volume { #[serde(rename = "RaidArrayId")] #[serde(skip_serializing_if = "Option::is_none")] pub raid_array_id: Option, - ///

The AWS region. For more information about AWS regions, see Regions and Endpoints.

+ ///

The AWS region. For more information about AWS regions, see Regions and Endpoints.

#[serde(rename = "Region")] #[serde(skip_serializing_if = "Option::is_none")] pub region: Option, @@ -3174,7 +3179,7 @@ pub struct Volume { #[serde(rename = "Size")] #[serde(skip_serializing_if = "Option::is_none")] pub size: Option, - ///

The value returned by DescribeVolumes.

+ ///

The value returned by DescribeVolumes.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, @@ -3182,7 +3187,7 @@ pub struct Volume { #[serde(rename = "VolumeId")] #[serde(skip_serializing_if = "Option::is_none")] pub volume_id: Option, - ///

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

+ ///

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

#[serde(rename = "VolumeType")] #[serde(skip_serializing_if = "Option::is_none")] pub volume_type: Option, @@ -3191,7 +3196,7 @@ pub struct Volume { ///

Describes an Amazon EBS volume configuration.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VolumeConfiguration { - ///

Specifies whether an Amazon EBS volume is encrypted. For more information, see Amazon EBS Encryption.

+ ///

Specifies whether an Amazon EBS volume is encrypted. For more information, see Amazon EBS Encryption.

#[serde(rename = "Encrypted")] #[serde(skip_serializing_if = "Option::is_none")] pub encrypted: Option, @@ -3212,7 +3217,7 @@ pub struct VolumeConfiguration { ///

The volume size.

#[serde(rename = "Size")] pub size: i64, - ///

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

+ ///

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

#[serde(rename = "VolumeType")] #[serde(skip_serializing_if = "Option::is_none")] pub volume_type: Option, @@ -5709,112 +5714,112 @@ impl Error for UpdateVolumeError { } /// Trait representing the capabilities of the AWS OpsWorks API. AWS OpsWorks clients implement this trait. pub trait OpsWorks { - ///

Assign a registered instance to a layer.

  • You can assign registered on-premises instances to any layer type.

  • You can assign registered Amazon EC2 instances only to custom layers.

  • You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Assign a registered instance to a layer.

  • You can assign registered on-premises instances to any layer type.

  • You can assign registered Amazon EC2 instances only to custom layers.

  • You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn assign_instance( &self, input: AssignInstanceRequest, ) -> RusotoFuture<(), AssignInstanceError>; - ///

Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn assign_volume(&self, input: AssignVolumeRequest) -> RusotoFuture<(), AssignVolumeError>; - ///

Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn associate_elastic_ip( &self, input: AssociateElasticIpRequest, ) -> RusotoFuture<(), AssociateElasticIpError>; - ///

Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. For more information, see Elastic Load Balancing.

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. For more information, see Elastic Load Balancing.

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn attach_elastic_load_balancer( &self, input: AttachElasticLoadBalancerRequest, ) -> RusotoFuture<(), AttachElasticLoadBalancerError>; - ///

Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn clone_stack( &self, input: CloneStackRequest, ) -> RusotoFuture; - ///

Creates an app for a specified stack. For more information, see Creating Apps.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Creates an app for a specified stack. For more information, see Creating Apps.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_app(&self, input: CreateAppRequest) -> RusotoFuture; - ///

Runs deployment or stack commands. For more information, see Deploying Apps and Run Stack Commands.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Runs deployment or stack commands. For more information, see Deploying Apps and Run Stack Commands.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_deployment( &self, input: CreateDeploymentRequest, ) -> RusotoFuture; - ///

Creates an instance in a specified stack. For more information, see Adding an Instance to a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Creates an instance in a specified stack. For more information, see Adding an Instance to a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_instance( &self, input: CreateInstanceRequest, ) -> RusotoFuture; - ///

Creates a layer. For more information, see How to Create a Layer.

You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Creates a layer. For more information, see How to Create a Layer.

You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_layer( &self, input: CreateLayerRequest, ) -> RusotoFuture; - ///

Creates a new stack. For more information, see Create a New Stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Creates a new stack. For more information, see Create a New Stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn create_stack( &self, input: CreateStackRequest, ) -> RusotoFuture; - ///

Creates a new user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Creates a new user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn create_user_profile( &self, input: CreateUserProfileRequest, ) -> RusotoFuture; - ///

Deletes a specified app.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified app.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_app(&self, input: DeleteAppRequest) -> RusotoFuture<(), DeleteAppError>; - ///

Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

For more information, see Deleting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

For more information, see Deleting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_instance( &self, input: DeleteInstanceRequest, ) -> RusotoFuture<(), DeleteInstanceError>; - ///

Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_layer(&self, input: DeleteLayerRequest) -> RusotoFuture<(), DeleteLayerError>; - ///

Deletes a specified stack. You must first delete all instances, layers, and apps or deregister registered instances. For more information, see Shut Down a Stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified stack. You must first delete all instances, layers, and apps or deregister registered instances. For more information, see Shut Down a Stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_stack(&self, input: DeleteStackRequest) -> RusotoFuture<(), DeleteStackError>; - ///

Deletes a user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Deletes a user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn delete_user_profile( &self, input: DeleteUserProfileRequest, ) -> RusotoFuture<(), DeleteUserProfileError>; - ///

Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

+ ///

Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

fn deregister_ecs_cluster( &self, input: DeregisterEcsClusterRequest, ) -> RusotoFuture<(), DeregisterEcsClusterError>; - ///

Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_elastic_ip( &self, input: DeregisterElasticIpRequest, ) -> RusotoFuture<(), DeregisterElasticIpError>; - ///

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_instance( &self, input: DeregisterInstanceRequest, ) -> RusotoFuture<(), DeregisterInstanceError>; - ///

Deregisters an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregisters an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_rds_db_instance( &self, input: DeregisterRdsDbInstanceRequest, ) -> RusotoFuture<(), DeregisterRdsDbInstanceError>; - ///

Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_volume( &self, input: DeregisterVolumeRequest, @@ -5826,61 +5831,61 @@ pub trait OpsWorks { input: DescribeAgentVersionsRequest, ) -> RusotoFuture; - ///

Requests a description of a specified set of apps.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a specified set of apps.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_apps( &self, input: DescribeAppsRequest, ) -> RusotoFuture; - ///

Describes the results of specified commands.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes the results of specified commands.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_commands( &self, input: DescribeCommandsRequest, ) -> RusotoFuture; - ///

Requests a description of a specified set of deployments.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a specified set of deployments.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_deployments( &self, input: DescribeDeploymentsRequest, ) -> RusotoFuture; - ///

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

+ ///

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

fn describe_ecs_clusters( &self, input: DescribeEcsClustersRequest, ) -> RusotoFuture; - ///

Describes Elastic IP addresses.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes Elastic IP addresses.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_elastic_ips( &self, input: DescribeElasticIpsRequest, ) -> RusotoFuture; - ///

Describes a stack's Elastic Load Balancing instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes a stack's Elastic Load Balancing instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_elastic_load_balancers( &self, input: DescribeElasticLoadBalancersRequest, ) -> RusotoFuture; - ///

Requests a description of a set of instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a set of instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_instances( &self, input: DescribeInstancesRequest, ) -> RusotoFuture; - ///

Requests a description of one or more layers in a specified stack.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of one or more layers in a specified stack.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_layers( &self, input: DescribeLayersRequest, ) -> RusotoFuture; - ///

Describes load-based auto scaling configurations for specified layers.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes load-based auto scaling configurations for specified layers.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_load_based_auto_scaling( &self, input: DescribeLoadBasedAutoScalingRequest, ) -> RusotoFuture; - ///

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_my_user_profile( &self, ) -> RusotoFuture; @@ -5890,31 +5895,31 @@ pub trait OpsWorks { &self, ) -> RusotoFuture; - ///

Describes the permissions for a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Describes the permissions for a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn describe_permissions( &self, input: DescribePermissionsRequest, ) -> RusotoFuture; - ///

Describe an instance's RAID arrays.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describe an instance's RAID arrays.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_raid_arrays( &self, input: DescribeRaidArraysRequest, ) -> RusotoFuture; - ///

Describes Amazon RDS instances.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

+ ///

Describes Amazon RDS instances.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

fn describe_rds_db_instances( &self, input: DescribeRdsDbInstancesRequest, ) -> RusotoFuture; - ///

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

+ ///

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

fn describe_service_errors( &self, input: DescribeServiceErrorsRequest, ) -> RusotoFuture; - ///

Requests a description of a stack's provisioning parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a stack's provisioning parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_stack_provisioning_parameters( &self, input: DescribeStackProvisioningParametersRequest, @@ -5923,49 +5928,49 @@ pub trait OpsWorks { DescribeStackProvisioningParametersError, >; - ///

Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_stack_summary( &self, input: DescribeStackSummaryRequest, ) -> RusotoFuture; - ///

Requests a description of one or more stacks.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of one or more stacks.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_stacks( &self, input: DescribeStacksRequest, ) -> RusotoFuture; - ///

Describes time-based auto scaling configurations for specified instances.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes time-based auto scaling configurations for specified instances.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_time_based_auto_scaling( &self, input: DescribeTimeBasedAutoScalingRequest, ) -> RusotoFuture; - ///

Describe specified users.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describe specified users.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_user_profiles( &self, input: DescribeUserProfilesRequest, ) -> RusotoFuture; - ///

Describes an instance's Amazon EBS volumes.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes an instance's Amazon EBS volumes.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_volumes( &self, input: DescribeVolumesRequest, ) -> RusotoFuture; - ///

Detaches a specified Elastic Load Balancing instance from its layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Detaches a specified Elastic Load Balancing instance from its layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn detach_elastic_load_balancer( &self, input: DetachElasticLoadBalancerRequest, ) -> RusotoFuture<(), DetachElasticLoadBalancerError>; - ///

Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn disassociate_elastic_ip( &self, input: DisassociateElasticIpRequest, ) -> RusotoFuture<(), DisassociateElasticIpError>; - ///

Gets a generated host name for the specified layer, based on the current host name theme.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Gets a generated host name for the specified layer, based on the current host name theme.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn get_hostname_suggestion( &self, input: GetHostnameSuggestionRequest, @@ -5980,79 +5985,79 @@ pub trait OpsWorks { ///

Returns a list of tags that are applied to the specified stack or layer.

fn list_tags(&self, input: ListTagsRequest) -> RusotoFuture; - ///

Reboots a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Reboots a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn reboot_instance( &self, input: RebootInstanceRequest, ) -> RusotoFuture<(), RebootInstanceError>; - ///

Registers a specified Amazon ECS cluster with a stack. You can register only one cluster with a stack. A cluster can be registered with only one stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers a specified Amazon ECS cluster with a stack. You can register only one cluster with a stack. A cluster can be registered with only one stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_ecs_cluster( &self, input: RegisterEcsClusterRequest, ) -> RusotoFuture; - ///

Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_elastic_ip( &self, input: RegisterElasticIpRequest, ) -> RusotoFuture; - ///

Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack.

We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stacks Stack.

Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack.

We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stacks Stack.

Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_instance( &self, input: RegisterInstanceRequest, ) -> RusotoFuture; - ///

Registers an Amazon RDS instance with a stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers an Amazon RDS instance with a stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_rds_db_instance( &self, input: RegisterRdsDbInstanceRequest, ) -> RusotoFuture<(), RegisterRdsDbInstanceError>; - ///

Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by calling DeregisterVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by calling DeregisterVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_volume( &self, input: RegisterVolumeRequest, ) -> RusotoFuture; - ///

Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances.

To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances.

To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn set_load_based_auto_scaling( &self, input: SetLoadBasedAutoScalingRequest, ) -> RusotoFuture<(), SetLoadBasedAutoScalingError>; - ///

Specifies a user's permissions. For more information, see Security and Permissions.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Specifies a user's permissions. For more information, see Security and Permissions.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn set_permission(&self, input: SetPermissionRequest) -> RusotoFuture<(), SetPermissionError>; - ///

Specify the time-based auto scaling configuration for a specified instance. For more information, see Managing Load with Time-based and Load-based Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Specify the time-based auto scaling configuration for a specified instance. For more information, see Managing Load with Time-based and Load-based Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn set_time_based_auto_scaling( &self, input: SetTimeBasedAutoScalingRequest, ) -> RusotoFuture<(), SetTimeBasedAutoScalingError>; - ///

Starts a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Starts a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn start_instance(&self, input: StartInstanceRequest) -> RusotoFuture<(), StartInstanceError>; - ///

Starts a stack's instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Starts a stack's instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn start_stack(&self, input: StartStackRequest) -> RusotoFuture<(), StartStackError>; - ///

Stops a specified instance. When you stop a standard instance, the data disappears and must be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without losing data. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Stops a specified instance. When you stop a standard instance, the data disappears and must be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without losing data. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn stop_instance(&self, input: StopInstanceRequest) -> RusotoFuture<(), StopInstanceError>; - ///

Stops a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Stops a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn stop_stack(&self, input: StopStackRequest) -> RusotoFuture<(), StopStackError>; - ///

Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide.

+ ///

Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide.

fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError>; - ///

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn unassign_instance( &self, input: UnassignInstanceRequest, ) -> RusotoFuture<(), UnassignInstanceError>; - ///

Unassigns an assigned Amazon EBS volume. The volume remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Unassigns an assigned Amazon EBS volume. The volume remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn unassign_volume( &self, input: UnassignVolumeRequest, @@ -6061,46 +6066,46 @@ pub trait OpsWorks { ///

Removes tags from a specified stack or layer.

fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError>; - ///

Updates a specified app.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified app.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_app(&self, input: UpdateAppRequest) -> RusotoFuture<(), UpdateAppError>; - ///

Updates a registered Elastic IP address's name. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a registered Elastic IP address's name. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_elastic_ip( &self, input: UpdateElasticIpRequest, ) -> RusotoFuture<(), UpdateElasticIpError>; - ///

Updates a specified instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_instance( &self, input: UpdateInstanceRequest, ) -> RusotoFuture<(), UpdateInstanceError>; - ///

Updates a specified layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_layer(&self, input: UpdateLayerRequest) -> RusotoFuture<(), UpdateLayerError>; - ///

Updates a user's SSH public key.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Updates a user's SSH public key.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn update_my_user_profile( &self, input: UpdateMyUserProfileRequest, ) -> RusotoFuture<(), UpdateMyUserProfileError>; - ///

Updates an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_rds_db_instance( &self, input: UpdateRdsDbInstanceRequest, ) -> RusotoFuture<(), UpdateRdsDbInstanceError>; - ///

Updates a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_stack(&self, input: UpdateStackRequest) -> RusotoFuture<(), UpdateStackError>; - ///

Updates a specified user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Updates a specified user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn update_user_profile( &self, input: UpdateUserProfileRequest, ) -> RusotoFuture<(), UpdateUserProfileError>; - ///

Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_volume(&self, input: UpdateVolumeRequest) -> RusotoFuture<(), UpdateVolumeError>; } /// A client for the AWS OpsWorks API. @@ -6115,10 +6120,7 @@ impl OpsWorksClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> OpsWorksClient { - OpsWorksClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6132,15 +6134,19 @@ impl OpsWorksClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - OpsWorksClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> OpsWorksClient { + OpsWorksClient { client, region } } } impl OpsWorks for OpsWorksClient { - ///

Assign a registered instance to a layer.

  • You can assign registered on-premises instances to any layer type.

  • You can assign registered Amazon EC2 instances only to custom layers.

  • You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Assign a registered instance to a layer.

  • You can assign registered on-premises instances to any layer type.

  • You can assign registered Amazon EC2 instances only to custom layers.

  • You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn assign_instance( &self, input: AssignInstanceRequest, @@ -6166,7 +6172,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn assign_volume(&self, input: AssignVolumeRequest) -> RusotoFuture<(), AssignVolumeError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -6189,7 +6195,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn associate_elastic_ip( &self, input: AssociateElasticIpRequest, @@ -6215,7 +6221,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. For more information, see Elastic Load Balancing.

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. For more information, see Elastic Load Balancing.

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn attach_elastic_load_balancer( &self, input: AttachElasticLoadBalancerRequest, @@ -6241,7 +6247,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn clone_stack( &self, input: CloneStackRequest, @@ -6270,7 +6276,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Creates an app for a specified stack. For more information, see Creating Apps.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Creates an app for a specified stack. For more information, see Creating Apps.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_app(&self, input: CreateAppRequest) -> RusotoFuture { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -6295,7 +6301,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Runs deployment or stack commands. For more information, see Deploying Apps and Run Stack Commands.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Runs deployment or stack commands. For more information, see Deploying Apps and Run Stack Commands.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_deployment( &self, input: CreateDeploymentRequest, @@ -6324,7 +6330,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Creates an instance in a specified stack. For more information, see Adding an Instance to a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Creates an instance in a specified stack. For more information, see Adding an Instance to a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_instance( &self, input: CreateInstanceRequest, @@ -6353,7 +6359,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Creates a layer. For more information, see How to Create a Layer.

You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Creates a layer. For more information, see How to Create a Layer.

You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn create_layer( &self, input: CreateLayerRequest, @@ -6382,7 +6388,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Creates a new stack. For more information, see Create a New Stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Creates a new stack. For more information, see Create a New Stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn create_stack( &self, input: CreateStackRequest, @@ -6411,7 +6417,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Creates a new user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Creates a new user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn create_user_profile( &self, input: CreateUserProfileRequest, @@ -6440,7 +6446,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deletes a specified app.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified app.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_app(&self, input: DeleteAppRequest) -> RusotoFuture<(), DeleteAppError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -6463,7 +6469,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

For more information, see Deleting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

For more information, see Deleting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_instance( &self, input: DeleteInstanceRequest, @@ -6489,7 +6495,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_layer(&self, input: DeleteLayerRequest) -> RusotoFuture<(), DeleteLayerError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -6512,7 +6518,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deletes a specified stack. You must first delete all instances, layers, and apps or deregister registered instances. For more information, see Shut Down a Stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deletes a specified stack. You must first delete all instances, layers, and apps or deregister registered instances. For more information, see Shut Down a Stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn delete_stack(&self, input: DeleteStackRequest) -> RusotoFuture<(), DeleteStackError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -6535,7 +6541,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deletes a user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Deletes a user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn delete_user_profile( &self, input: DeleteUserProfileRequest, @@ -6561,7 +6567,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

+ ///

Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

fn deregister_ecs_cluster( &self, input: DeregisterEcsClusterRequest, @@ -6586,7 +6592,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_elastic_ip( &self, input: DeregisterElasticIpRequest, @@ -6611,7 +6617,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_instance( &self, input: DeregisterInstanceRequest, @@ -6637,7 +6643,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deregisters an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregisters an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_rds_db_instance( &self, input: DeregisterRdsDbInstanceRequest, @@ -6660,7 +6666,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn deregister_volume( &self, input: DeregisterVolumeRequest, @@ -6714,7 +6720,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Requests a description of a specified set of apps.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a specified set of apps.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_apps( &self, input: DescribeAppsRequest, @@ -6743,7 +6749,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes the results of specified commands.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes the results of specified commands.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_commands( &self, input: DescribeCommandsRequest, @@ -6772,7 +6778,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Requests a description of a specified set of deployments.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a specified set of deployments.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_deployments( &self, input: DescribeDeploymentsRequest, @@ -6800,7 +6806,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

+ ///

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

fn describe_ecs_clusters( &self, input: DescribeEcsClustersRequest, @@ -6828,7 +6834,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes Elastic IP addresses.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes Elastic IP addresses.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_elastic_ips( &self, input: DescribeElasticIpsRequest, @@ -6857,7 +6863,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes a stack's Elastic Load Balancing instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes a stack's Elastic Load Balancing instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_elastic_load_balancers( &self, input: DescribeElasticLoadBalancersRequest, @@ -6886,7 +6892,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Requests a description of a set of instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a set of instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_instances( &self, input: DescribeInstancesRequest, @@ -6915,7 +6921,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Requests a description of one or more layers in a specified stack.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of one or more layers in a specified stack.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_layers( &self, input: DescribeLayersRequest, @@ -6944,7 +6950,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes load-based auto scaling configurations for specified layers.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes load-based auto scaling configurations for specified layers.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_load_based_auto_scaling( &self, input: DescribeLoadBasedAutoScalingRequest, @@ -6973,7 +6979,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_my_user_profile( &self, ) -> RusotoFuture { @@ -7023,7 +7029,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes the permissions for a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Describes the permissions for a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn describe_permissions( &self, input: DescribePermissionsRequest, @@ -7051,7 +7057,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describe an instance's RAID arrays.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describe an instance's RAID arrays.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_raid_arrays( &self, input: DescribeRaidArraysRequest, @@ -7080,7 +7086,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes Amazon RDS instances.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

+ ///

Describes Amazon RDS instances.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

fn describe_rds_db_instances( &self, input: DescribeRdsDbInstancesRequest, @@ -7108,7 +7114,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

+ ///

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

fn describe_service_errors( &self, input: DescribeServiceErrorsRequest, @@ -7136,7 +7142,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Requests a description of a stack's provisioning parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of a stack's provisioning parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_stack_provisioning_parameters( &self, input: DescribeStackProvisioningParametersRequest, @@ -7170,7 +7176,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_stack_summary( &self, input: DescribeStackSummaryRequest, @@ -7198,7 +7204,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Requests a description of one or more stacks.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Requests a description of one or more stacks.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_stacks( &self, input: DescribeStacksRequest, @@ -7227,7 +7233,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes time-based auto scaling configurations for specified instances.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes time-based auto scaling configurations for specified instances.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_time_based_auto_scaling( &self, input: DescribeTimeBasedAutoScalingRequest, @@ -7256,7 +7262,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describe specified users.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describe specified users.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_user_profiles( &self, input: DescribeUserProfilesRequest, @@ -7284,7 +7290,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Describes an instance's Amazon EBS volumes.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Describes an instance's Amazon EBS volumes.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn describe_volumes( &self, input: DescribeVolumesRequest, @@ -7313,7 +7319,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Detaches a specified Elastic Load Balancing instance from its layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Detaches a specified Elastic Load Balancing instance from its layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn detach_elastic_load_balancer( &self, input: DetachElasticLoadBalancerRequest, @@ -7339,7 +7345,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn disassociate_elastic_ip( &self, input: DisassociateElasticIpRequest, @@ -7364,7 +7370,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Gets a generated host name for the specified layer, based on the current host name theme.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Gets a generated host name for the specified layer, based on the current host name theme.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn get_hostname_suggestion( &self, input: GetHostnameSuggestionRequest, @@ -7446,7 +7452,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Reboots a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Reboots a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn reboot_instance( &self, input: RebootInstanceRequest, @@ -7472,7 +7478,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Registers a specified Amazon ECS cluster with a stack. You can register only one cluster with a stack. A cluster can be registered with only one stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers a specified Amazon ECS cluster with a stack. You can register only one cluster with a stack. A cluster can be registered with only one stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_ecs_cluster( &self, input: RegisterEcsClusterRequest, @@ -7501,7 +7507,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_elastic_ip( &self, input: RegisterElasticIpRequest, @@ -7530,7 +7536,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack.

We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stacks Stack.

Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack.

We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stacks Stack.

Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_instance( &self, input: RegisterInstanceRequest, @@ -7559,7 +7565,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Registers an Amazon RDS instance with a stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers an Amazon RDS instance with a stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_rds_db_instance( &self, input: RegisterRdsDbInstanceRequest, @@ -7584,7 +7590,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by calling DeregisterVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by calling DeregisterVolume. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn register_volume( &self, input: RegisterVolumeRequest, @@ -7613,7 +7619,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances.

To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances.

To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn set_load_based_auto_scaling( &self, input: SetLoadBasedAutoScalingRequest, @@ -7636,7 +7642,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Specifies a user's permissions. For more information, see Security and Permissions.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Specifies a user's permissions. For more information, see Security and Permissions.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn set_permission(&self, input: SetPermissionRequest) -> RusotoFuture<(), SetPermissionError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7659,7 +7665,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Specify the time-based auto scaling configuration for a specified instance. For more information, see Managing Load with Time-based and Load-based Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Specify the time-based auto scaling configuration for a specified instance. For more information, see Managing Load with Time-based and Load-based Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn set_time_based_auto_scaling( &self, input: SetTimeBasedAutoScalingRequest, @@ -7682,7 +7688,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Starts a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Starts a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn start_instance(&self, input: StartInstanceRequest) -> RusotoFuture<(), StartInstanceError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7705,7 +7711,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Starts a stack's instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Starts a stack's instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn start_stack(&self, input: StartStackRequest) -> RusotoFuture<(), StartStackError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7728,7 +7734,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Stops a specified instance. When you stop a standard instance, the data disappears and must be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without losing data. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Stops a specified instance. When you stop a standard instance, the data disappears and must be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without losing data. For more information, see Starting, Stopping, and Rebooting Instances.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn stop_instance(&self, input: StopInstanceRequest) -> RusotoFuture<(), StopInstanceError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7751,7 +7757,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Stops a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Stops a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn stop_stack(&self, input: StopStackRequest) -> RusotoFuture<(), StopStackError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7774,7 +7780,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide.

+ ///

Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide.

fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7797,7 +7803,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn unassign_instance( &self, input: UnassignInstanceRequest, @@ -7823,7 +7829,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Unassigns an assigned Amazon EBS volume. The volume remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Unassigns an assigned Amazon EBS volume. The volume remains registered with the stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn unassign_volume( &self, input: UnassignVolumeRequest, @@ -7872,7 +7878,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a specified app.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified app.

Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_app(&self, input: UpdateAppRequest) -> RusotoFuture<(), UpdateAppError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7895,7 +7901,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a registered Elastic IP address's name. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a registered Elastic IP address's name. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_elastic_ip( &self, input: UpdateElasticIpRequest, @@ -7921,7 +7927,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a specified instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_instance( &self, input: UpdateInstanceRequest, @@ -7947,7 +7953,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a specified layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified layer.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_layer(&self, input: UpdateLayerRequest) -> RusotoFuture<(), UpdateLayerError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -7970,7 +7976,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a user's SSH public key.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Updates a user's SSH public key.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn update_my_user_profile( &self, input: UpdateMyUserProfileRequest, @@ -7995,7 +8001,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates an Amazon RDS instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_rds_db_instance( &self, input: UpdateRdsDbInstanceRequest, @@ -8020,7 +8026,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates a specified stack.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_stack(&self, input: UpdateStackRequest) -> RusotoFuture<(), UpdateStackError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); @@ -8043,7 +8049,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates a specified user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

+ ///

Updates a specified user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

fn update_user_profile( &self, input: UpdateUserProfileRequest, @@ -8069,7 +8075,7 @@ impl OpsWorks for OpsWorksClient { }) } - ///

Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

+ ///

Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

fn update_volume(&self, input: UpdateVolumeRequest) -> RusotoFuture<(), UpdateVolumeError> { let mut request = SignedRequest::new("POST", "opsworks", &self.region, "/"); diff --git a/rusoto/services/opsworks/src/lib.rs b/rusoto/services/opsworks/src/lib.rs index 3d38add0e08..f2371b0b66f 100644 --- a/rusoto/services/opsworks/src/lib.rs +++ b/rusoto/services/opsworks/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

AWS OpsWorks

Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes.

AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

SDKs and CLI

The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

Endpoints

AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created.

  • opsworks.us-east-1.amazonaws.com

  • opsworks.us-east-2.amazonaws.com

  • opsworks.us-west-1.amazonaws.com

  • opsworks.us-west-2.amazonaws.com

  • opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console)

  • opsworks.eu-west-1.amazonaws.com

  • opsworks.eu-west-2.amazonaws.com

  • opsworks.eu-west-3.amazonaws.com

  • opsworks.eu-central-1.amazonaws.com

  • opsworks.ap-northeast-1.amazonaws.com

  • opsworks.ap-northeast-2.amazonaws.com

  • opsworks.ap-south-1.amazonaws.com

  • opsworks.ap-southeast-1.amazonaws.com

  • opsworks.ap-southeast-2.amazonaws.com

  • opsworks.sa-east-1.amazonaws.com

Chef Versions

When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

+//!

AWS OpsWorks

Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes.

AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

SDKs and CLI

The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

Endpoints

AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created.

  • opsworks.us-east-1.amazonaws.com

  • opsworks.us-east-2.amazonaws.com

  • opsworks.us-west-1.amazonaws.com

  • opsworks.us-west-2.amazonaws.com

  • opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console)

  • opsworks.eu-west-1.amazonaws.com

  • opsworks.eu-west-2.amazonaws.com

  • opsworks.eu-west-3.amazonaws.com

  • opsworks.eu-central-1.amazonaws.com

  • opsworks.ap-northeast-1.amazonaws.com

  • opsworks.ap-northeast-2.amazonaws.com

  • opsworks.ap-south-1.amazonaws.com

  • opsworks.ap-southeast-1.amazonaws.com

  • opsworks.ap-southeast-2.amazonaws.com

  • opsworks.sa-east-1.amazonaws.com

Chef Versions

When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

//! //! If you're using the service, you're probably looking for [OpsWorksClient](struct.OpsWorksClient.html) and [OpsWorks](trait.OpsWorks.html). diff --git a/rusoto/services/opsworkscm/Cargo.toml b/rusoto/services/opsworkscm/Cargo.toml index dc1e91af180..36cab98eef9 100644 --- a/rusoto/services/opsworkscm/Cargo.toml +++ b/rusoto/services/opsworkscm/Cargo.toml @@ -1,13 +1,13 @@ [package] authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] -description = "AWS SDK for Rust - AWS OpsWorks for Chef Automate @ 2016-11-01" +description = "AWS SDK for Rust - AWS OpsWorks CM @ 2016-11-01" documentation = "https://docs.rs/rusoto_opsworkscm" keywords = ["AWS", "Amazon", "opsworkscm"] license = "MIT" name = "rusoto_opsworkscm" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/opsworkscm/README.md b/rusoto/services/opsworkscm/README.md index 24ef63fa2e2..ad974181d20 100644 --- a/rusoto/services/opsworkscm/README.md +++ b/rusoto/services/opsworkscm/README.md @@ -1,6 +1,6 @@ # Rusoto OpsWorksCM -Rust SDK for AWS OpsWorks for Chef Automate +Rust SDK for AWS OpsWorks CM You may be looking for: @@ -23,9 +23,16 @@ To use `rusoto_opsworkscm` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_opsworkscm = "0.40.0" +rusoto_opsworkscm = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/opsworkscm/src/custom/mod.rs b/rusoto/services/opsworkscm/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/opsworkscm/src/custom/mod.rs +++ b/rusoto/services/opsworkscm/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/opsworkscm/src/generated.rs b/rusoto/services/opsworkscm/src/generated.rs index ac85409d837..f0de4d3d927 100644 --- a/rusoto/services/opsworkscm/src/generated.rs +++ b/rusoto/services/opsworkscm/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Stores account attributes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccountAttribute { ///

The maximum allowed value.

#[serde(rename = "Maximum")] @@ -56,7 +55,7 @@ pub struct AssociateNodeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateNodeResponse { ///

Contains a token which can be passed to the DescribeNodeAssociationStatus API call to get the status of the association request.

#[serde(rename = "NodeAssociationStatusToken")] @@ -66,7 +65,7 @@ pub struct AssociateNodeResponse { ///

Describes a single backup.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Backup { ///

The ARN of the backup.

#[serde(rename = "BackupArn")] @@ -170,7 +169,7 @@ pub struct CreateBackupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateBackupResponse { ///

Backup created by request.

#[serde(rename = "Backup")] @@ -247,7 +246,7 @@ pub struct CreateServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateServerResponse { ///

The server that is created by the request.

#[serde(rename = "Server")] @@ -263,7 +262,7 @@ pub struct DeleteBackupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBackupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -274,14 +273,14 @@ pub struct DeleteServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteServerResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeAccountAttributesRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAccountAttributesResponse { ///

The attributes that are currently set for the account.

#[serde(rename = "Attributes")] @@ -310,7 +309,7 @@ pub struct DescribeBackupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBackupsResponse { ///

Contains the response to a DescribeBackups request.

#[serde(rename = "Backups")] @@ -338,7 +337,7 @@ pub struct DescribeEventsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEventsResponse { ///

NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeEvents again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null. Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.

#[serde(rename = "NextToken")] @@ -361,7 +360,7 @@ pub struct DescribeNodeAssociationStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeNodeAssociationStatusResponse { ///

Attributes specific to the node association. In Puppet, the attibute PUPPET_NODE_CERT contains the signed certificate (the result of the CSR).

#[serde(rename = "EngineAttributes")] @@ -390,7 +389,7 @@ pub struct DescribeServersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeServersResponse { ///

This is not currently implemented for DescribeServers requests.

#[serde(rename = "NextToken")] @@ -417,7 +416,7 @@ pub struct DisassociateNodeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateNodeResponse { ///

Contains a token which can be passed to the DescribeNodeAssociationStatus API call to get the status of the disassociation request.

#[serde(rename = "NodeAssociationStatusToken")] @@ -453,7 +452,7 @@ pub struct ExportServerEngineAttributeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExportServerEngineAttributeResponse { ///

The requested engine attribute pair with attribute name and value.

#[serde(rename = "EngineAttribute")] @@ -484,12 +483,12 @@ pub struct RestoreServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreServerResponse {} ///

Describes a configuration management server.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Server { ///

Associate a public IP address with a server that you are launching.

#[serde(rename = "AssociatePublicIpAddress")] @@ -587,7 +586,7 @@ pub struct Server { ///

An event that is related to the server, such as the start of maintenance or backup.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServerEvent { ///

The time when the event occurred.

#[serde(rename = "CreatedAt")] @@ -619,7 +618,7 @@ pub struct StartMaintenanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartMaintenanceResponse { ///

Contains the response to a StartMaintenance request.

#[serde(rename = "Server")] @@ -642,7 +641,7 @@ pub struct UpdateServerEngineAttributesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServerEngineAttributesResponse { ///

Contains the response to an UpdateServerEngineAttributes request.

#[serde(rename = "Server")] @@ -672,7 +671,7 @@ pub struct UpdateServerRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServerResponse { ///

Contains the response to a UpdateServer request.

#[serde(rename = "Server")] @@ -1421,10 +1420,7 @@ impl OpsWorksCMClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> OpsWorksCMClient { - OpsWorksCMClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1438,10 +1434,14 @@ impl OpsWorksCMClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - OpsWorksCMClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> OpsWorksCMClient { + OpsWorksCMClient { client, region } } } diff --git a/rusoto/services/organizations/Cargo.toml b/rusoto/services/organizations/Cargo.toml index 6fd284aae22..92631d8c46c 100644 --- a/rusoto/services/organizations/Cargo.toml +++ b/rusoto/services/organizations/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_organizations" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/organizations/README.md b/rusoto/services/organizations/README.md index a604327f449..21bd3e8833d 100644 --- a/rusoto/services/organizations/README.md +++ b/rusoto/services/organizations/README.md @@ -23,9 +23,16 @@ To use `rusoto_organizations` in your application, add it as a dependency in you ```toml [dependencies] -rusoto_organizations = "0.40.0" +rusoto_organizations = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/organizations/src/custom/mod.rs b/rusoto/services/organizations/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/organizations/src/custom/mod.rs +++ b/rusoto/services/organizations/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/organizations/src/generated.rs b/rusoto/services/organizations/src/generated.rs index 5df441f36dd..3de4a5e7984 100644 --- a/rusoto/services/organizations/src/generated.rs +++ b/rusoto/services/organizations/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct AcceptHandshakeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptHandshakeResponse { ///

A structure that contains details about the accepted handshake.

#[serde(rename = "Handshake")] @@ -42,7 +41,7 @@ pub struct AcceptHandshakeResponse { ///

Contains information about an AWS account that is a member of an organization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Account { ///

The Amazon Resource Name (ARN) of the account.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

#[serde(rename = "Arn")] @@ -79,7 +78,7 @@ pub struct AttachPolicyRequest { ///

The unique identifier (ID) of the policy that you want to attach to the target. You can get the ID for the policy by calling the ListPolicies operation.

The regex pattern for a policy ID string requires "p-" followed by from 8 to 128 lower-case letters or digits.

#[serde(rename = "PolicyId")] pub policy_id: String, - ///

The unique identifier (ID) of the root, OU, or account that you want to attach the policy to. You can get the ID by calling the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Account: a string that consists of exactly 12 digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the root, OU, or account that you want to attach the policy to. You can get the ID by calling the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Account - A string that consists of exactly 12 digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "TargetId")] pub target_id: String, } @@ -92,7 +91,7 @@ pub struct CancelHandshakeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelHandshakeResponse { ///

A structure that contains details about the handshake that you canceled.

#[serde(rename = "Handshake")] @@ -102,7 +101,7 @@ pub struct CancelHandshakeResponse { ///

Contains a list of child entities, either OUs or accounts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Child { ///

The unique identifier (ID) of this child entity.

The regex pattern for a child ID string requires one of the following:

  • Account: a string that consists of exactly 12 digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "Id")] @@ -133,7 +132,7 @@ pub struct CreateAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAccountResponse { ///

A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times. You can also check the AWS CloudTrail log for the CreateAccountResult event. For more information, see Monitoring the Activity in Your Organization in the AWS Organizations User Guide.

#[serde(rename = "CreateAccountStatus")] @@ -143,7 +142,7 @@ pub struct CreateAccountResponse { ///

Contains the status about a CreateAccount or CreateGovCloudAccount request to create an AWS account or an AWS GovCloud (US) account in an organization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAccountStatus { ///

If the account was created successfully, the unique identifier (ID) of the new account.

The regex pattern for an account ID string requires exactly 12 digits.

#[serde(rename = "AccountId")] @@ -198,7 +197,7 @@ pub struct CreateGovCloudAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGovCloudAccountResponse { #[serde(rename = "CreateAccountStatus")] #[serde(skip_serializing_if = "Option::is_none")] @@ -207,14 +206,14 @@ pub struct CreateGovCloudAccountResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateOrganizationRequest { - ///

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

  • CONSOLIDATEDBILLING: All member accounts have their bills consolidated to and paid by the master account. For more information, see <a href="https://docs.aws.amazon.com/organizations/latest/userguide/orgsgetting-startedconcepts.html#feature-set-cb-only">Consolidated billing in the AWS Organizations User Guide.

    The consolidated billing feature subset isn't available for organizations in the AWS GovCloud (US) Region.

  • ALL: In addition to all the features supported by the consolidated billing feature set, the master account can also apply any type of policy to any member account in the organization. For more information, see <a href="https://docs.aws.amazon.com/organizations/latest/userguide/orgsgetting-started_concepts.html#feature-set-all">All features in the AWS Organizations User Guide.

+ ///

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

  • CONSOLIDATEDBILLING: All member accounts have their bills consolidated to and paid by the master account. For more information, see <a href="https://docs.aws.amazon.com/organizations/latest/userguide/orgsgetting-startedconcepts.html#feature-set-cb-only">Consolidated billing in the AWS Organizations User Guide.

    The consolidated billing feature subset isn't available for organizations in the AWS GovCloud (US) Region.

  • ALL: In addition to all the features supported by the consolidated billing feature set, the master account can also apply any policy type to any member account in the organization. For more information, see <a href="https://docs.aws.amazon.com/organizations/latest/userguide/orgsgetting-started_concepts.html#feature-set-all">All features in the AWS Organizations User Guide.

#[serde(rename = "FeatureSet")] #[serde(skip_serializing_if = "Option::is_none")] pub feature_set: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateOrganizationResponse { ///

A structure that contains details about the newly created organization.

#[serde(rename = "Organization")] @@ -227,13 +226,13 @@ pub struct CreateOrganizationalUnitRequest { ///

The friendly name to assign to the new OU.

#[serde(rename = "Name")] pub name: String, - ///

The unique identifier (ID) of the parent root or OU that you want to create the new OU in.

The regex pattern for a parent ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the parent root or OU that you want to create the new OU in.

The regex pattern for a parent ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "ParentId")] pub parent_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateOrganizationalUnitResponse { ///

A structure that contains details about the newly created OU.

#[serde(rename = "OrganizationalUnit")] @@ -258,7 +257,7 @@ pub struct CreatePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePolicyResponse { ///

A structure that contains details about the newly created policy.

#[serde(rename = "Policy")] @@ -274,7 +273,7 @@ pub struct DeclineHandshakeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeclineHandshakeResponse { ///

A structure that contains details about the declined handshake. The state is updated to show the value DECLINED.

#[serde(rename = "Handshake")] @@ -304,7 +303,7 @@ pub struct DescribeAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAccountResponse { ///

A structure that contains information about the requested account.

#[serde(rename = "Account")] @@ -314,13 +313,13 @@ pub struct DescribeAccountResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeCreateAccountStatusRequest { - ///

Specifies the operationId that uniquely identifies the request. You can get the ID from the response to an earlier CreateAccount request, or from the ListCreateAccountStatus operation.

The regex pattern for an create account request ID string requires "car-" followed by from 8 to 32 lower-case letters or digits.

+ ///

Specifies the operationId that uniquely identifies the request. You can get the ID from the response to an earlier CreateAccount request, or from the ListCreateAccountStatus operation.

The regex pattern for a create account request ID string requires "car-" followed by from 8 to 32 lower-case letters or digits.

#[serde(rename = "CreateAccountRequestId")] pub create_account_request_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCreateAccountStatusResponse { ///

A structure that contains the current status of an account creation request.

#[serde(rename = "CreateAccountStatus")] @@ -336,7 +335,7 @@ pub struct DescribeHandshakeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeHandshakeResponse { ///

A structure that contains information about the specified handshake.

#[serde(rename = "Handshake")] @@ -345,7 +344,7 @@ pub struct DescribeHandshakeResponse { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOrganizationResponse { ///

A structure that contains information about the organization.

#[serde(rename = "Organization")] @@ -361,7 +360,7 @@ pub struct DescribeOrganizationalUnitRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOrganizationalUnitResponse { ///

A structure that contains details about the specified OU.

#[serde(rename = "OrganizationalUnit")] @@ -377,7 +376,7 @@ pub struct DescribePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePolicyResponse { ///

A structure that contains details about the specified policy.

#[serde(rename = "Policy")] @@ -390,7 +389,7 @@ pub struct DetachPolicyRequest { ///

The unique identifier (ID) of the policy you want to detach. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires "p-" followed by from 8 to 128 lower-case letters or digits.

#[serde(rename = "PolicyId")] pub policy_id: String, - ///

The unique identifier (ID) of the root, OU, or account that you want to detach the policy from. You can get the ID from the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Account: a string that consists of exactly 12 digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the root, OU, or account that you want to detach the policy from. You can get the ID from the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Account - A string that consists of exactly 12 digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "TargetId")] pub target_id: String, } @@ -413,7 +412,7 @@ pub struct DisablePolicyTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisablePolicyTypeResponse { ///

A structure that shows the root with the updated list of enabled policy types.

#[serde(rename = "Root")] @@ -432,7 +431,7 @@ pub struct EnableAWSServiceAccessRequest { pub struct EnableAllFeaturesRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableAllFeaturesResponse { ///

A structure that contains details about the handshake created to support this request to enable all features in the organization.

#[serde(rename = "Handshake")] @@ -451,7 +450,7 @@ pub struct EnablePolicyTypeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnablePolicyTypeResponse { ///

A structure that shows the root with the updated list of enabled policy types.

#[serde(rename = "Root")] @@ -461,7 +460,7 @@ pub struct EnablePolicyTypeResponse { ///

A structure that contains details of a service principal that is enabled to integrate with AWS Organizations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnabledServicePrincipal { ///

The date that the service principal was enabled for integration with AWS Organizations.

#[serde(rename = "DateEnabled")] @@ -475,7 +474,7 @@ pub struct EnabledServicePrincipal { ///

Contains information that must be exchanged to securely establish a relationship between two accounts (an originator and a recipient). For example, when a master account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses.

Note: Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Handshake { ///

The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported:

  • INVITE: This type of handshake represents a request to join an organization. It is always sent from the master account to only non-member accounts.

  • ENABLEALLFEATURES: This type of handshake represents a request to enable all features in an organization. It is always sent from the master account to only invited member accounts. Created accounts do not receive this because those accounts were created by the organization's master account and approval is inferred.

  • APPROVEALLFEATURES: This type of handshake is sent from the Organizations service when all member accounts have approved the ENABLEALLFEATURES invitation. It is sent only to the master account and signals the master that it can finalize the process to enable all features.

#[serde(rename = "Action")] @@ -537,7 +536,7 @@ pub struct HandshakeParty { ///

Contains additional data that is needed to process a handshake.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HandshakeResource { ///

When needed, contains an additional array of HandshakeResource objects.

#[serde(rename = "Resources")] @@ -565,7 +564,7 @@ pub struct InviteAccountToOrganizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InviteAccountToOrganizationResponse { ///

A structure that contains details about the handshake that is created to support this invitation request.

#[serde(rename = "Handshake")] @@ -586,7 +585,7 @@ pub struct ListAWSServiceAccessForOrganizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAWSServiceAccessForOrganizationResponse { ///

A list of the service principals for the services that are enabled to integrate with your organization. Each principal is a structure that includes the name and the date that it was enabled for integration with AWS Organizations.

#[serde(rename = "EnabledServicePrincipals")] @@ -614,7 +613,7 @@ pub struct ListAccountsForParentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAccountsForParentResponse { ///

A list of the accounts in the specified root or OU.

#[serde(rename = "Accounts")] @@ -639,7 +638,7 @@ pub struct ListAccountsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAccountsResponse { ///

A list of objects in the organization.

#[serde(rename = "Accounts")] @@ -664,13 +663,13 @@ pub struct ListChildrenRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The unique identifier (ID) for the parent root or OU whose children you want to list.

The regex pattern for a parent ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) for the parent root or OU whose children you want to list.

The regex pattern for a parent ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "ParentId")] pub parent_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListChildrenResponse { ///

The list of children of the specified parent container.

#[serde(rename = "Children")] @@ -699,7 +698,7 @@ pub struct ListCreateAccountStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCreateAccountStatusResponse { ///

A list of objects with details about the requests. Certain elements, such as the accountId number, are present in the output only after the account has been successfully created.

#[serde(rename = "CreateAccountStatuses")] @@ -728,7 +727,7 @@ pub struct ListHandshakesForAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHandshakesForAccountResponse { ///

A list of Handshake objects with details about each of the handshakes that is associated with the specified account.

#[serde(rename = "Handshakes")] @@ -757,7 +756,7 @@ pub struct ListHandshakesForOrganizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHandshakesForOrganizationResponse { ///

A list of Handshake objects with details about each of the handshakes that are associated with an organization.

#[serde(rename = "Handshakes")] @@ -779,13 +778,13 @@ pub struct ListOrganizationalUnitsForParentRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The unique identifier (ID) of the root or OU whose child OUs you want to list.

The regex pattern for a parent ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the root or OU whose child OUs you want to list.

The regex pattern for a parent ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "ParentId")] pub parent_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOrganizationalUnitsForParentResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -799,7 +798,7 @@ pub struct ListOrganizationalUnitsForParentResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListParentsRequest { - ///

The unique identifier (ID) of the OU or account whose parent containers you want to list. Don't specify a root.

The regex pattern for a child ID string requires one of the following:

  • Account: a string that consists of exactly 12 digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the OU or account whose parent containers you want to list. Don't specify a root.

The regex pattern for a child ID string requires one of the following:

  • Account - A string that consists of exactly 12 digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "ChildId")] pub child_id: String, ///

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

@@ -813,7 +812,7 @@ pub struct ListParentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListParentsResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -838,13 +837,13 @@ pub struct ListPoliciesForTargetRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list.

The regex pattern for a target ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Account: a string that consists of exactly 12 digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list.

The regex pattern for a target ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Account - A string that consists of exactly 12 digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "TargetId")] pub target_id: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPoliciesForTargetResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -872,7 +871,7 @@ pub struct ListPoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPoliciesResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -897,7 +896,7 @@ pub struct ListRootsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRootsResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -921,7 +920,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -949,7 +948,7 @@ pub struct ListTargetsForPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTargetsForPolicyResponse { ///

If present, this value indicates that there is more output available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

#[serde(rename = "NextToken")] @@ -966,17 +965,17 @@ pub struct MoveAccountRequest { ///

The unique identifier (ID) of the account that you want to move.

The regex pattern for an account ID string requires exactly 12 digits.

#[serde(rename = "AccountId")] pub account_id: String, - ///

The unique identifier (ID) of the root or organizational unit that you want to move the account to.

The regex pattern for a parent ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the root or organizational unit that you want to move the account to.

The regex pattern for a parent ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "DestinationParentId")] pub destination_parent_id: String, - ///

The unique identifier (ID) of the root or organizational unit that you want to move the account from.

The regex pattern for a parent ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

+ ///

The unique identifier (ID) of the root or organizational unit that you want to move the account from.

The regex pattern for a parent ID string requires one of the following:

  • Root - A string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "SourceParentId")] pub source_parent_id: String, } ///

Contains details about an organization. An organization is a collection of accounts that are centrally managed together using consolidated billing, organized hierarchically with organizational units (OUs), and controlled with policies .

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Organization { ///

The Amazon Resource Name (ARN) of an organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

#[serde(rename = "Arn")] @@ -1010,7 +1009,7 @@ pub struct Organization { ///

Contains details about an organizational unit (OU). An OU is a container of AWS accounts within a root of an organization. Policies that are attached to an OU apply to all accounts contained in that OU and in any child OUs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OrganizationalUnit { ///

The Amazon Resource Name (ARN) of this OU.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

#[serde(rename = "Arn")] @@ -1028,7 +1027,7 @@ pub struct OrganizationalUnit { ///

Contains information about either a root or an organizational unit (OU) that can contain OUs or accounts in an organization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Parent { ///

The unique identifier (ID) of the parent entity.

The regex pattern for a parent ID string requires one of the following:

  • Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.

#[serde(rename = "Id")] @@ -1042,7 +1041,7 @@ pub struct Parent { ///

Contains rules to be applied to the affected accounts. Policies can be attached directly to accounts, or to roots and OUs to affect all accounts in those hierarchies.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Policy { ///

The text content of the policy.

#[serde(rename = "Content")] @@ -1056,7 +1055,7 @@ pub struct Policy { ///

Contains information about a policy, but does not include the content. To see the content of a policy, see DescribePolicy.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicySummary { ///

The Amazon Resource Name (ARN) of the policy.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

#[serde(rename = "Arn")] @@ -1086,7 +1085,7 @@ pub struct PolicySummary { ///

Contains information about a root, OU, or account that a policy is attached to.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyTargetSummary { ///

The Amazon Resource Name (ARN) of the policy target.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

#[serde(rename = "Arn")] @@ -1108,7 +1107,7 @@ pub struct PolicyTargetSummary { ///

Contains information about a policy type and its status in the associated root.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PolicyTypeSummary { ///

The status of the policy type as it relates to the associated root. To attach a policy of the specified type to a root or to an OU or account in that root, it must be available in the organization and enabled for that root.

#[serde(rename = "Status")] @@ -1129,7 +1128,7 @@ pub struct RemoveAccountFromOrganizationRequest { ///

Contains details about a root. A root is a top-level parent node in the hierarchy of an organization that can contain organizational units (OUs) and accounts. Every root contains every AWS account in the organization. Each root enables the accounts to be organized in a different way and to have different policy types enabled for use in that root.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Root { ///

The Amazon Resource Name (ARN) of the root.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

#[serde(rename = "Arn")] @@ -1154,12 +1153,10 @@ pub struct Root { pub struct Tag { ///

The key identifier, or name, of the tag.

#[serde(rename = "Key")] - #[serde(skip_serializing_if = "Option::is_none")] - pub key: Option, - ///

The string value that's associated with the key of the tag.

+ pub key: String, + ///

The string value that's associated with the key of the tag. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

#[serde(rename = "Value")] - #[serde(skip_serializing_if = "Option::is_none")] - pub value: Option, + pub value: String, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1167,7 +1164,7 @@ pub struct TagResourceRequest { ///

The ID of the resource to add a tag to.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The tag to add to the specified resource.

+ ///

The tag to add to the specified resource. Specifying the tag key is required. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

#[serde(rename = "Tags")] pub tags: Vec, } @@ -1194,7 +1191,7 @@ pub struct UpdateOrganizationalUnitRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateOrganizationalUnitResponse { ///

A structure that contains the details about the specified OU, including its new name.

#[serde(rename = "OrganizationalUnit")] @@ -1222,7 +1219,7 @@ pub struct UpdatePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePolicyResponse { ///

A structure that contains details about the updated policy, showing the requested changes.

#[serde(rename = "Policy")] @@ -1249,7 +1246,7 @@ pub enum AcceptHandshakeError { HandshakeNotFound(String), ///

You can't perform the operation on the handshake in its current state. For example, you can't cancel a handshake that was already accepted or accept a handshake that was already declined.

InvalidHandshakeTransition(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -1348,7 +1345,7 @@ pub enum AttachPolicyError { ConstraintViolation(String), ///

The selected policy is already attached to the specified target.

DuplicatePolicyAttachment(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a policy with the PolicyId that you specified.

PolicyNotFound(String), @@ -1445,7 +1442,7 @@ pub enum CancelHandshakeError { HandshakeNotFound(String), ///

You can't perform the operation on the handshake in its current state. For example, you can't cancel a handshake that was already accepted or accept a handshake that was already declined.

InvalidHandshakeTransition(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -1526,7 +1523,7 @@ pub enum CreateAccountError { ConstraintViolation(String), ///

AWS Organizations couldn't perform the operation because your organization hasn't finished initializing. This can take up to an hour. Try again later. If after one hour you continue to receive this error, contact AWS Support.

FinalizingOrganization(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -1615,7 +1612,7 @@ pub enum CreateGovCloudAccountError { ConstraintViolation(String), ///

AWS Organizations couldn't perform the operation because your organization hasn't finished initializing. This can take up to an hour. Try again later. If after one hour you continue to receive this error, contact AWS Support.

FinalizingOrganization(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -1708,7 +1705,7 @@ pub enum CreateOrganizationError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -1791,7 +1788,7 @@ pub enum CreateOrganizationalUnitError { ConstraintViolation(String), ///

An OU with the same name already exists.

DuplicateOrganizationalUnit(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a root or OU with the ParentId that you specified.

ParentNotFound(String), @@ -1888,7 +1885,7 @@ pub enum CreatePolicyError { ConstraintViolation(String), ///

A policy with the same name already exists.

DuplicatePolicy(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

MalformedPolicyDocument(String), @@ -1981,7 +1978,7 @@ pub enum DeclineHandshakeError { HandshakeNotFound(String), ///

You can't perform the operation on the handshake in its current state. For example, you can't cancel a handshake that was already accepted or accept a handshake that was already declined.

InvalidHandshakeTransition(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -2058,7 +2055,7 @@ pub enum DeleteOrganizationError { AccessDenied(String), ///

The target of the operation is currently being modified by a different request. Try again later.

ConcurrentModification(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The organization isn't empty. To delete an organization, you must first remove all accounts except the master account, delete all OUs, and delete all policies.

OrganizationNotEmpty(String), @@ -2133,7 +2130,7 @@ pub enum DeleteOrganizationalUnitError { AccessDenied(String), ///

The target of the operation is currently being modified by a different request. Try again later.

ConcurrentModification(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The specified OU is not empty. Move all accounts to another root or to other OUs, remove all child OUs, and try the operation again.

OrganizationalUnitNotEmpty(String), @@ -2222,7 +2219,7 @@ pub enum DeletePolicyError { AccessDenied(String), ///

The target of the operation is currently being modified by a different request. Try again later.

ConcurrentModification(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The policy is attached to one or more entities. You must detach it from all roots, OUs, and accounts before performing this operation.

PolicyInUse(String), @@ -2299,7 +2296,7 @@ pub enum DescribeAccountError { AccessDenied(String), ///

We can't find an AWS account with the AccountId that you specified, or the account whose credentials you used to make this request isn't a member of an organization.

AccountNotFound(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -2364,7 +2361,7 @@ pub enum DescribeCreateAccountStatusError { AccessDenied(String), ///

We can't find an create account request with the CreateAccountRequestId that you specified.

CreateAccountStatusNotFound(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -2447,7 +2444,7 @@ pub enum DescribeHandshakeError { ConcurrentModification(String), ///

We can't find a handshake with the HandshakeId that you specified.

HandshakeNotFound(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -2573,7 +2570,7 @@ pub enum DescribeOrganizationalUnitError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find an OU with the OrganizationalUnitId that you specified.

OrganizationalUnitNotFound(String), @@ -2648,7 +2645,7 @@ pub enum DescribePolicyError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a policy with the PolicyId that you specified.

PolicyNotFound(String), @@ -2717,7 +2714,7 @@ pub enum DetachPolicyError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The policy isn't attached to the specified target in the specified root.

PolicyNotAttached(String), @@ -2806,7 +2803,7 @@ pub enum DisableAWSServiceAccessError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -2887,7 +2884,7 @@ pub enum DisablePolicyTypeError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

PolicyTypeNotEnabled(String), @@ -2976,7 +2973,7 @@ pub enum EnableAWSServiceAccessError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3053,7 +3050,7 @@ pub enum EnableAllFeaturesError { ConcurrentModification(String), ///

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

    If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • ALREADYINANORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • INVITEDISABLEDDURINGENABLEALLFEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

  • ORGANIZATIONALREADYHASALLFEATURES: The handshake request is invalid because the organization has already enabled all features.

  • ORGANIZATIONFROMDIFFERENTSELLEROFRECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

  • ORGANIZATIONMEMBERSHIPCHANGERATELIMITEXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

  • PAYMENTINSTRUMENTREQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

HandshakeConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3128,7 +3125,7 @@ pub enum EnablePolicyTypeError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The specified policy type is already enabled in the specified root.

PolicyTypeAlreadyEnabled(String), @@ -3231,7 +3228,7 @@ pub enum InviteAccountToOrganizationError { FinalizingOrganization(String), ///

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

    If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • ALREADYINANORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • INVITEDISABLEDDURINGENABLEALLFEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

  • ORGANIZATIONALREADYHASALLFEATURES: The handshake request is invalid because the organization has already enabled all features.

  • ORGANIZATIONFROMDIFFERENTSELLEROFRECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

  • ORGANIZATIONMEMBERSHIPCHANGERATELIMITEXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

  • PAYMENTINSTRUMENTREQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

HandshakeConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3334,7 +3331,7 @@ pub enum LeaveOrganizationError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

You can't remove a master account from an organization. If you want the master account to become a member account in another organization, you must first delete the current organization of the master account.

MasterCannotLeaveOrganization(String), @@ -3419,7 +3416,7 @@ pub enum ListAWSServiceAccessForOrganizationError { AccessDenied(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3494,7 +3491,7 @@ pub enum ListAccountsError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3553,7 +3550,7 @@ pub enum ListAccountsForParentError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a root or OU with the ParentId that you specified.

ParentNotFound(String), @@ -3622,7 +3619,7 @@ pub enum ListChildrenError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a root or OU with the ParentId that you specified.

ParentNotFound(String), @@ -3687,7 +3684,7 @@ pub enum ListCreateAccountStatusError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3760,7 +3757,7 @@ pub enum ListHandshakesForAccountError { AccessDenied(String), ///

The target of the operation is currently being modified by a different request. Try again later.

ConcurrentModification(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3827,7 +3824,7 @@ pub enum ListHandshakesForOrganizationError { AccessDenied(String), ///

The target of the operation is currently being modified by a different request. Try again later.

ConcurrentModification(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -3902,7 +3899,7 @@ pub enum ListOrganizationalUnitsForParentError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a root or OU with the ParentId that you specified.

ParentNotFound(String), @@ -3981,7 +3978,7 @@ pub enum ListParentsError { AccessDenied(String), ///

We can't find an organizational unit (OU) or AWS account with the ChildId that you specified.

ChildNotFound(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4044,7 +4041,7 @@ pub enum ListPoliciesError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4103,7 +4100,7 @@ pub enum ListPoliciesForTargetError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4172,7 +4169,7 @@ pub enum ListRootsError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4229,7 +4226,7 @@ pub enum ListTagsForResourceError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4294,7 +4291,7 @@ pub enum ListTargetsForPolicyError { AWSOrganizationsNotInUse(String), ///

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

AccessDenied(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find a policy with the PolicyId that you specified.

PolicyNotFound(String), @@ -4369,7 +4366,7 @@ pub enum MoveAccountError { DestinationParentNotFound(String), ///

That account is already present in the specified destination.

DuplicateAccount(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4458,7 +4455,7 @@ pub enum RemoveAccountFromOrganizationError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

You can't remove a master account from an organization. If you want the master account to become a member account in another organization, you must first delete the current organization of the master account.

MasterCannotLeaveOrganization(String), @@ -4557,7 +4554,7 @@ pub enum TagResourceError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4634,7 +4631,7 @@ pub enum UntagResourceError { ConcurrentModification(String), ///

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNTCANNOTLEAVEWITHOUTEULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCANNOTLEAVEWITHOUTPHONEVERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at <a href="http://docs.aws.amazon.com/organizations/latest/userguide/orgsmanageaccountsremove.html#leave-without-all-info">To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNTCREATIONRATELIMITEXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNTNUMBERLIMITEXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKERATELIMITEXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTERACCOUNTADDRESSDOESNOTMATCHMARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTERACCOUNTMISSINGCONTACTINFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTERACCOUNTNOTGOVCLOUDENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTERACCOUNTPAYMENTINSTRUMENTREQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAXPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAXTAGLIMITEXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBERACCOUNTPAYMENTINSTRUMENTREQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MINPOLICYTYPEATTACHMENTLIMITEXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OUDEPTHLIMITEXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATIONNOTINALLFEATURESMODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OUNUMBERLIMITEXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICYNUMBERLIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

ConstraintViolation(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

AWS Organizations can't complete your request because of an internal service error. Try again later.

Service(String), @@ -4713,7 +4710,7 @@ pub enum UpdateOrganizationalUnitError { ConcurrentModification(String), ///

An OU with the same name already exists.

DuplicateOrganizationalUnit(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

We can't find an OU with the OrganizationalUnitId that you specified.

OrganizationalUnitNotFound(String), @@ -4804,7 +4801,7 @@ pub enum UpdatePolicyError { ConstraintViolation(String), ///

A policy with the same name already exists.

DuplicatePolicy(String), - ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified a value that isn't valid for that parameter.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

+ ///

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLEPOLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUTREQUIRED: You must include a value for all required parameters.

  • INVALIDENUM: You specified an invalid value.

  • INVALIDFULLNAMETARGET: You specified a full name that contains invalid characters.

  • INVALIDLISTMEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALIDPAGINATIONTOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALIDPARTYTYPETARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALIDPATTERN: You provided a value that doesn't match the required pattern.

  • INVALIDPATTERNTARGETID: You specified a policy target ID that doesn't match the required pattern.

  • INVALIDROLENAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALIDSYNTAXORGANIZATIONARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALIDSYNTAXPOLICYID: You specified an invalid policy ID.

  • INVALIDSYSTEMTAGSPARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAXFILTERLIMITEXCEEDED: You can specify only one filter parameter for the operation.

  • MAXLENGTHEXCEEDED: You provided a string parameter that is longer than allowed.

  • MAXVALUEEXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MINLENGTHEXCEEDED: You provided a string parameter that is shorter than allowed.

  • MINVALUEEXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVINGACCOUNTBETWEENDIFFERENT_ROOTS: You can move an account only between entities in the same root.

InvalidInput(String), ///

The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

MalformedPolicyDocument(String), @@ -4890,7 +4887,7 @@ pub trait Organizations { input: AcceptHandshakeRequest, ) -> RusotoFuture; - ///

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs

    • If you attach the policy directly to an account, it affects only that account

    SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU). When you attach one SCP to a higher level root or OU, and you also attach a different SCP to a child OU or to an account, the child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child can't grant a permission that the paren't hasn't already granted. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They can't use A or B because the child OU filtered them out. They also can't use F and G because the parent OU filtered them out. They can't be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named "FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how AWS Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

+ ///

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, it affects only that account.

    SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU). You can attach one SCP to a higher level root or OU, and a different SCP to a child OU or to an account. The child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child can't grant a permission that the parent hasn't already granted. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They can't use A or B because the child OU filtered them out. They also can't use F and G because the parent OU filtered them out. They can't be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named "FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how AWS Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

fn attach_policy(&self, input: AttachPolicyRequest) -> RusotoFuture<(), AttachPolicyError>; ///

Cancels a handshake. Canceling a handshake sets the handshake state to CANCELED.

This operation can be called only from the account that originated the handshake. The recipient of the handshake can't cancel it, but can use DeclineHandshake instead. After a handshake is canceled, the recipient can no longer respond to that handshake.

After you cancel a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

@@ -4899,7 +4896,7 @@ pub trait Organizations { input: CancelHandshakeRequest, ) -> RusotoFuture; - ///

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

+ ///

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

fn create_account( &self, input: CreateAccountRequest, @@ -4982,7 +4979,7 @@ pub trait Organizations { input: DescribePolicyRequest, ) -> RusotoFuture; - ///

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify "Effect": "Deny" in the second SCP to override the "Effect": "Allow" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

+ ///

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify "Effect": "Deny" in the second SCP to override the "Effect": "Allow" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of blacklisting .

This operation can be called only from the organization's master account.

fn detach_policy(&self, input: DetachPolicyRequest) -> RusotoFuture<(), DetachPolicyError>; ///

Disables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

We recommend that you disable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts unless the operations are explicitly permitted by the IAM policies that are attached to your roles.

For more information about integrating other services with AWS Organizations, including the list of services that work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

@@ -4991,7 +4988,7 @@ pub trait Organizations { input: DisableAWSServiceAccessRequest, ) -> RusotoFuture<(), DisableAWSServiceAccessError>; - ///

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This operation can be called only from the organization's master account.

If you disable a policy type for a root, it still shows as enabled for the organization if all features are enabled in that organization. Use ListRoots to see the status of policy types for a specified root. Use DescribeOrganization to see the status of policy types in the organization.

+ ///

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

To view the status of available policy types in the organization, use DescribeOrganization.

fn disable_policy_type( &self, input: DisablePolicyTypeRequest, @@ -5008,7 +5005,7 @@ pub trait Organizations { &self, ) -> RusotoFuture; - ///

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. Use DescribeOrganization to view the status of available policy types in the organization.

To view the status of policy type in a root, use ListRoots.

+ ///

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This is an asynchronous request that AWS performs in the background. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

fn enable_policy_type( &self, input: EnablePolicyTypeRequest, @@ -5149,10 +5146,7 @@ impl OrganizationsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> OrganizationsClient { - OrganizationsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5166,10 +5160,14 @@ impl OrganizationsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - OrganizationsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> OrganizationsClient { + OrganizationsClient { client, region } } } @@ -5203,7 +5201,7 @@ impl Organizations for OrganizationsClient { }) } - ///

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs

    • If you attach the policy directly to an account, it affects only that account

    SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU). When you attach one SCP to a higher level root or OU, and you also attach a different SCP to a child OU or to an account, the child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child can't grant a permission that the paren't hasn't already granted. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They can't use A or B because the child OU filtered them out. They also can't use F and G because the parent OU filtered them out. They can't be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named "FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how AWS Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

+ ///

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, it affects only that account.

    SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU). You can attach one SCP to a higher level root or OU, and a different SCP to a child OU or to an account. The child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child can't grant a permission that the parent hasn't already granted. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They can't use A or B because the child OU filtered them out. They also can't use F and G because the parent OU filtered them out. They can't be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named "FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how AWS Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

fn attach_policy(&self, input: AttachPolicyRequest) -> RusotoFuture<(), AttachPolicyError> { let mut request = SignedRequest::new("POST", "organizations", &self.region, "/"); @@ -5255,7 +5253,7 @@ impl Organizations for OrganizationsClient { }) } - ///

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

+ ///

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

fn create_account( &self, input: CreateAccountRequest, @@ -5685,7 +5683,7 @@ impl Organizations for OrganizationsClient { }) } - ///

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify "Effect": "Deny" in the second SCP to override the "Effect": "Allow" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

+ ///

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify "Effect": "Deny" in the second SCP to override the "Effect": "Allow" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of blacklisting .

This operation can be called only from the organization's master account.

fn detach_policy(&self, input: DetachPolicyRequest) -> RusotoFuture<(), DetachPolicyError> { let mut request = SignedRequest::new("POST", "organizations", &self.region, "/"); @@ -5734,7 +5732,7 @@ impl Organizations for OrganizationsClient { }) } - ///

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This operation can be called only from the organization's master account.

If you disable a policy type for a root, it still shows as enabled for the organization if all features are enabled in that organization. Use ListRoots to see the status of policy types for a specified root. Use DescribeOrganization to see the status of policy types in the organization.

+ ///

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

To view the status of available policy types in the organization, use DescribeOrganization.

fn disable_policy_type( &self, input: DisablePolicyTypeRequest, @@ -5824,7 +5822,7 @@ impl Organizations for OrganizationsClient { }) } - ///

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. Use DescribeOrganization to view the status of available policy types in the organization.

To view the status of policy type in a root, use ListRoots.

+ ///

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This is an asynchronous request that AWS performs in the background. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

fn enable_policy_type( &self, input: EnablePolicyTypeRequest, diff --git a/rusoto/services/pi/Cargo.toml b/rusoto/services/pi/Cargo.toml index d9ba367dd6a..69820f04bbb 100644 --- a/rusoto/services/pi/Cargo.toml +++ b/rusoto/services/pi/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_pi" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/pi/README.md b/rusoto/services/pi/README.md index 1fd7905c27e..80ffbc45d8c 100644 --- a/rusoto/services/pi/README.md +++ b/rusoto/services/pi/README.md @@ -23,9 +23,16 @@ To use `rusoto_pi` in your application, add it as a dependency in your `Cargo.to ```toml [dependencies] -rusoto_pi = "0.40.0" +rusoto_pi = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/pi/src/custom/mod.rs b/rusoto/services/pi/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/pi/src/custom/mod.rs +++ b/rusoto/services/pi/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/pi/src/generated.rs b/rusoto/services/pi/src/generated.rs index aed8fbea044..e512df546ac 100644 --- a/rusoto/services/pi/src/generated.rs +++ b/rusoto/services/pi/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

A timestamp, and a single numerical value, which together represent a measurement at a particular point in time.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DataPoint { ///

The time, in epoch format, associated with a particular Value.

#[serde(rename = "Timestamp")] @@ -79,7 +78,7 @@ pub struct DescribeDimensionKeysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDimensionKeysResponse { ///

The end time for the returned dimension keys, after alignment to a granular boundary (as specified by PeriodInSeconds). AlignedEndTime will be greater than or equal to the value of the user-specified Endtime.

#[serde(rename = "AlignedEndTime")] @@ -121,7 +120,7 @@ pub struct DimensionGroup { ///

An array of descriptions and aggregated values for each dimension within a dimension group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DimensionKeyDescription { ///

A map of name-value pairs for the dimensions in the group.

#[serde(rename = "Dimensions")] @@ -169,7 +168,7 @@ pub struct GetResourceMetricsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceMetricsResponse { ///

The end time for the returned metrics, after alignment to a granular boundary (as specified by PeriodInSeconds). AlignedEndTime will be greater than or equal to the value of the user-specified Endtime.

#[serde(rename = "AlignedEndTime")] @@ -195,7 +194,7 @@ pub struct GetResourceMetricsResponse { ///

A time-ordered series of data points, correpsonding to a dimension of a Performance Insights metric.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MetricKeyDataPoints { ///

An array of timestamp-value pairs, representing measurements over a period of time.

#[serde(rename = "DataPoints")] @@ -225,7 +224,7 @@ pub struct MetricQuery { ///

If PartitionBy was specified in a DescribeDimensionKeys request, the dimensions are returned in an array. Each element in the array specifies one dimension.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResponsePartitionKey { ///

A dimension map that contains the dimension(s) for this partition.

#[serde(rename = "Dimensions")] @@ -234,7 +233,7 @@ pub struct ResponsePartitionKey { ///

An object describing a Performance Insights metric and one or more dimensions for that metric.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResponseResourceMetricKey { ///

The valid dimensions for the metric.

#[serde(rename = "Dimensions")] @@ -367,10 +366,7 @@ impl PerformanceInsightsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> PerformanceInsightsClient { - PerformanceInsightsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -384,10 +380,14 @@ impl PerformanceInsightsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - PerformanceInsightsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> PerformanceInsightsClient { + PerformanceInsightsClient { client, region } } } diff --git a/rusoto/services/polly/Cargo.toml b/rusoto/services/polly/Cargo.toml index 031dc4be180..48665ce0015 100644 --- a/rusoto/services/polly/Cargo.toml +++ b/rusoto/services/polly/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_polly" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/polly/README.md b/rusoto/services/polly/README.md index b65884a9944..6b23209e9c1 100644 --- a/rusoto/services/polly/README.md +++ b/rusoto/services/polly/README.md @@ -23,9 +23,16 @@ To use `rusoto_polly` in your application, add it as a dependency in your `Cargo ```toml [dependencies] -rusoto_polly = "0.40.0" +rusoto_polly = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/polly/src/custom/mod.rs b/rusoto/services/polly/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/polly/src/custom/mod.rs +++ b/rusoto/services/polly/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/polly/src/generated.rs b/rusoto/services/polly/src/generated.rs index ff21270fb38..fcbe134801e 100644 --- a/rusoto/services/polly/src/generated.rs +++ b/rusoto/services/polly/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -33,11 +32,15 @@ pub struct DeleteLexiconInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLexiconOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeVoicesInput { + ///

Specifies the engine (standard or neural) used by Amazon Polly when processing input text for speech synthesis.

+ #[serde(rename = "Engine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub engine: Option, ///

Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no.

#[serde(rename = "IncludeAdditionalLanguageCodes")] #[serde(skip_serializing_if = "Option::is_none")] @@ -53,7 +56,7 @@ pub struct DescribeVoicesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeVoicesOutput { ///

The pagination token to use in the next request to continue the listing of voices. NextToken is returned only if the response is truncated.

#[serde(rename = "NextToken")] @@ -73,7 +76,7 @@ pub struct GetLexiconInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLexiconOutput { ///

Lexicon object that provides name and the string content of the lexicon.

#[serde(rename = "Lexicon")] @@ -93,7 +96,7 @@ pub struct GetSpeechSynthesisTaskInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSpeechSynthesisTaskOutput { ///

SynthesisTask object that provides information from the requested task, including output format, creation time, task status, and so on.

#[serde(rename = "SynthesisTask")] @@ -103,7 +106,7 @@ pub struct GetSpeechSynthesisTaskOutput { ///

Provides lexicon name and lexicon content in string format. For more information, see Pronunciation Lexicon Specification (PLS) Version 1.0.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Lexicon { ///

Lexicon content in string format. The content of a lexicon must be in PLS format.

#[serde(rename = "Content")] @@ -115,9 +118,9 @@ pub struct Lexicon { pub name: Option, } -///

Contains metadata describing the lexicon such as the number of lexemes, language code, and so on. For more information, see Managing Lexicons.

+///

Contains metadata describing the lexicon such as the number of lexemes, language code, and so on. For more information, see Managing Lexicons.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LexiconAttributes { ///

Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa.

#[serde(rename = "Alphabet")] @@ -147,7 +150,7 @@ pub struct LexiconAttributes { ///

Describes the content of the lexicon.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LexiconDescription { ///

Provides lexicon metadata.

#[serde(rename = "Attributes")] @@ -168,7 +171,7 @@ pub struct ListLexiconsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLexiconsOutput { ///

A list of lexicon names and attributes.

#[serde(rename = "Lexicons")] @@ -197,7 +200,7 @@ pub struct ListSpeechSynthesisTasksInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSpeechSynthesisTasksOutput { ///

An opaque pagination token returned from the previous List operation in this request. If present, this indicates where to continue the listing.

#[serde(rename = "NextToken")] @@ -220,11 +223,15 @@ pub struct PutLexiconInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLexiconOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartSpeechSynthesisTaskInput { + ///

Specifies the engine (standard or neural) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.

+ #[serde(rename = "Engine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub engine: Option, ///

Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

#[serde(rename = "LanguageCode")] #[serde(skip_serializing_if = "Option::is_none")] @@ -243,7 +250,7 @@ pub struct StartSpeechSynthesisTaskInput { #[serde(rename = "OutputS3KeyPrefix")] #[serde(skip_serializing_if = "Option::is_none")] pub output_s3_key_prefix: Option, - ///

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". The default value is "22050".

Valid values for pcm are "8000" and "16000" The default value is "16000".

+ ///

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".

Valid values for pcm are "8000" and "16000" The default value is "16000".

#[serde(rename = "SampleRate")] #[serde(skip_serializing_if = "Option::is_none")] pub sample_rate: Option, @@ -268,7 +275,7 @@ pub struct StartSpeechSynthesisTaskInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSpeechSynthesisTaskOutput { ///

SynthesisTask object that provides information and attributes about a newly submitted speech synthesis task.

#[serde(rename = "SynthesisTask")] @@ -278,12 +285,16 @@ pub struct StartSpeechSynthesisTaskOutput { ///

SynthesisTask object that provides information about a speech synthesis task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SynthesisTask { ///

Timestamp for the time the synthesis task was started.

#[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, + ///

Specifies the engine (standard or neural) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.

+ #[serde(rename = "Engine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub engine: Option, ///

Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

#[serde(rename = "LanguageCode")] #[serde(skip_serializing_if = "Option::is_none")] @@ -304,7 +315,7 @@ pub struct SynthesisTask { #[serde(rename = "RequestCharacters")] #[serde(skip_serializing_if = "Option::is_none")] pub request_characters: Option, - ///

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". The default value is "22050".

Valid values for pcm are "8000" and "16000" The default value is "16000".

+ ///

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".

Valid values for pcm are "8000" and "16000" The default value is "16000".

#[serde(rename = "SampleRate")] #[serde(skip_serializing_if = "Option::is_none")] pub sample_rate: Option, @@ -340,18 +351,22 @@ pub struct SynthesisTask { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SynthesizeSpeechInput { + ///

Specifies the engine (standard or neural) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.

+ #[serde(rename = "Engine")] + #[serde(skip_serializing_if = "Option::is_none")] + pub engine: Option, ///

Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

#[serde(rename = "LanguageCode")] #[serde(skip_serializing_if = "Option::is_none")] pub language_code: Option, - ///

List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice. For information about storing lexicons, see PutLexicon.

+ ///

List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice. For information about storing lexicons, see PutLexicon.

#[serde(rename = "LexiconNames")] #[serde(skip_serializing_if = "Option::is_none")] pub lexicon_names: Option>, ///

The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.

#[serde(rename = "OutputFormat")] pub output_format: String, - ///

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". The default value is "22050".

Valid values for pcm are "8000" and "16000" The default value is "16000".

+ ///

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".

Valid values for pcm are "8000" and "16000" The default value is "16000".

#[serde(rename = "SampleRate")] #[serde(skip_serializing_if = "Option::is_none")] pub sample_rate: Option, @@ -362,11 +377,11 @@ pub struct SynthesizeSpeechInput { ///

Input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.

#[serde(rename = "Text")] pub text: String, - ///

Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML.

+ ///

Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML.

#[serde(rename = "TextType")] #[serde(skip_serializing_if = "Option::is_none")] pub text_type: Option, - ///

Voice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.

+ ///

Voice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.

#[serde(rename = "VoiceId")] pub voice_id: String, } @@ -383,7 +398,7 @@ pub struct SynthesizeSpeechOutput { ///

Description of the voice.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Voice { ///

Additional codes for languages available for the specified voice in addition to its default language.

For example, the default language for Aditi is Indian English (en-IN) because it was first used for that language. Since Aditi is bilingual and fluent in both Indian English and Hindi, this parameter would show the code hi-IN.

#[serde(rename = "AdditionalLanguageCodes")] @@ -409,6 +424,10 @@ pub struct Voice { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + ///

Specifies which engines (standard or neural) that are supported by a given voice.

+ #[serde(rename = "SupportedEngines")] + #[serde(skip_serializing_if = "Option::is_none")] + pub supported_engines: Option>, } /// Errors returned by DeleteLexicon @@ -676,7 +695,7 @@ pub enum PutLexiconError { ServiceFailure(String), ///

The alphabet specified by the lexicon is not a supported alphabet. Valid values are x-sampa and ipa.

UnsupportedPlsAlphabet(String), - ///

The language specified in the lexicon is unsupported. For a list of supported languages, see Lexicon Attributes.

+ ///

The language specified in the lexicon is unsupported. For a list of supported languages, see Lexicon Attributes.

UnsupportedPlsLanguage(String), } @@ -735,6 +754,8 @@ impl Error for PutLexiconError { /// Errors returned by StartSpeechSynthesisTask #[derive(Debug, PartialEq)] pub enum StartSpeechSynthesisTaskError { + ///

This engine is not compatible with the voice that you have designated. Choose a new voice that is compatible with the engine or change the engine and restart the operation.

+ EngineNotSupported(String), ///

The provided Amazon S3 bucket name is invalid. Please check your input with S3 bucket naming requirements and try again.

InvalidS3Bucket(String), ///

The provided Amazon S3 key prefix is invalid. Please provide a valid S3 object key name.

@@ -763,6 +784,11 @@ impl StartSpeechSynthesisTaskError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { + "EngineNotSupportedException" => { + return RusotoError::Service(StartSpeechSynthesisTaskError::EngineNotSupported( + err.msg, + )) + } "InvalidS3BucketException" => { return RusotoError::Service(StartSpeechSynthesisTaskError::InvalidS3Bucket( err.msg, @@ -833,6 +859,7 @@ impl fmt::Display for StartSpeechSynthesisTaskError { impl Error for StartSpeechSynthesisTaskError { fn description(&self) -> &str { match *self { + StartSpeechSynthesisTaskError::EngineNotSupported(ref cause) => cause, StartSpeechSynthesisTaskError::InvalidS3Bucket(ref cause) => cause, StartSpeechSynthesisTaskError::InvalidS3Key(ref cause) => cause, StartSpeechSynthesisTaskError::InvalidSampleRate(ref cause) => cause, @@ -850,6 +877,8 @@ impl Error for StartSpeechSynthesisTaskError { /// Errors returned by SynthesizeSpeech #[derive(Debug, PartialEq)] pub enum SynthesizeSpeechError { + ///

This engine is not compatible with the voice that you have designated. Choose a new voice that is compatible with the engine or change the engine and restart the operation.

+ EngineNotSupported(String), ///

The specified sample rate is not valid.

InvalidSampleRate(String), ///

The SSML you provided is invalid. Verify the SSML syntax, spelling of tags and values, and then try again.

@@ -872,6 +901,9 @@ impl SynthesizeSpeechError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse_rest(&res) { match err.typ.as_str() { + "EngineNotSupportedException" => { + return RusotoError::Service(SynthesizeSpeechError::EngineNotSupported(err.msg)) + } "InvalidSampleRateException" => { return RusotoError::Service(SynthesizeSpeechError::InvalidSampleRate(err.msg)) } @@ -917,6 +949,7 @@ impl fmt::Display for SynthesizeSpeechError { impl Error for SynthesizeSpeechError { fn description(&self) -> &str { match *self { + SynthesizeSpeechError::EngineNotSupported(ref cause) => cause, SynthesizeSpeechError::InvalidSampleRate(ref cause) => cause, SynthesizeSpeechError::InvalidSsml(ref cause) => cause, SynthesizeSpeechError::LanguageNotSupported(ref cause) => cause, @@ -930,7 +963,7 @@ impl Error for SynthesizeSpeechError { } /// Trait representing the capabilities of the Amazon Polly API. Amazon Polly clients implement this trait. pub trait Polly { - ///

Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.

For more information, see Managing Lexicons.

+ ///

Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.

For more information, see Managing Lexicons.

fn delete_lexicon( &self, input: DeleteLexiconInput, @@ -942,7 +975,7 @@ pub trait Polly { input: DescribeVoicesInput, ) -> RusotoFuture; - ///

Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.

+ ///

Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.

fn get_lexicon( &self, input: GetLexiconInput, @@ -954,7 +987,7 @@ pub trait Polly { input: GetSpeechSynthesisTaskInput, ) -> RusotoFuture; - ///

Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.

+ ///

Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.

fn list_lexicons( &self, input: ListLexiconsInput, @@ -966,7 +999,7 @@ pub trait Polly { input: ListSpeechSynthesisTasksInput, ) -> RusotoFuture; - ///

Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

+ ///

Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

fn put_lexicon( &self, input: PutLexiconInput, @@ -978,7 +1011,7 @@ pub trait Polly { input: StartSpeechSynthesisTaskInput, ) -> RusotoFuture; - ///

Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.

+ ///

Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.

fn synthesize_speech( &self, input: SynthesizeSpeechInput, @@ -996,10 +1029,7 @@ impl PollyClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> PollyClient { - PollyClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1013,15 +1043,19 @@ impl PollyClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - PollyClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> PollyClient { + PollyClient { client, region } } } impl Polly for PollyClient { - ///

Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.

For more information, see Managing Lexicons.

+ ///

Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.

For more information, see Managing Lexicons.

fn delete_lexicon( &self, input: DeleteLexiconInput, @@ -1061,6 +1095,9 @@ impl Polly for PollyClient { request.set_content_type("application/x-amz-json-1.1".to_owned()); let mut params = Params::new(); + if let Some(ref x) = input.engine { + params.put("Engine", x); + } if let Some(ref x) = input.include_additional_language_codes { params.put("IncludeAdditionalLanguageCodes", x); } @@ -1091,7 +1128,7 @@ impl Polly for PollyClient { }) } - ///

Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.

+ ///

Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.

fn get_lexicon( &self, input: GetLexiconInput, @@ -1148,7 +1185,7 @@ impl Polly for PollyClient { }) } - ///

Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.

+ ///

Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.

fn list_lexicons( &self, input: ListLexiconsInput, @@ -1221,7 +1258,7 @@ impl Polly for PollyClient { }) } - ///

Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

+ ///

Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

fn put_lexicon( &self, input: PutLexiconInput, @@ -1282,7 +1319,7 @@ impl Polly for PollyClient { }) } - ///

Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.

+ ///

Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.

fn synthesize_speech( &self, input: SynthesizeSpeechInput, diff --git a/rusoto/services/pricing/Cargo.toml b/rusoto/services/pricing/Cargo.toml index d903004a82a..bb4a8224fb4 100644 --- a/rusoto/services/pricing/Cargo.toml +++ b/rusoto/services/pricing/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_pricing" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/pricing/README.md b/rusoto/services/pricing/README.md index bd9a92dd602..e376e22162c 100644 --- a/rusoto/services/pricing/README.md +++ b/rusoto/services/pricing/README.md @@ -23,9 +23,16 @@ To use `rusoto_pricing` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_pricing = "0.40.0" +rusoto_pricing = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/pricing/src/custom/mod.rs b/rusoto/services/pricing/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/pricing/src/custom/mod.rs +++ b/rusoto/services/pricing/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/pricing/src/generated.rs b/rusoto/services/pricing/src/generated.rs index 7438116d5e4..e3635f19289 100644 --- a/rusoto/services/pricing/src/generated.rs +++ b/rusoto/services/pricing/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

The values of a given attribute, such as Throughput Optimized HDD or Provisioned IOPS for the Amazon EC2 volumeType attribute.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttributeValue { ///

The specific value of an attributeName.

#[serde(rename = "Value")] @@ -55,7 +54,7 @@ pub struct DescribeServicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeServicesResponse { ///

The format version of the response. For example, aws_v1.

#[serde(rename = "FormatVersion")] @@ -104,7 +103,7 @@ pub struct GetAttributeValuesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAttributeValuesResponse { ///

The list of values for an attribute. For example, Throughput Optimized HDD and Provisioned IOPS are two available values for the AmazonEC2 volumeType.

#[serde(rename = "AttributeValues")] @@ -141,7 +140,7 @@ pub struct GetProductsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetProductsResponse { ///

The format version of the response. For example, aws_v1.

#[serde(rename = "FormatVersion")] @@ -159,7 +158,7 @@ pub struct GetProductsResponse { ///

The metadata for a service, such as the service code and available attribute names.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Service { ///

The attributes that are available for this service.

#[serde(rename = "AttributeNames")] @@ -374,10 +373,7 @@ impl PricingClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> PricingClient { - PricingClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -391,10 +387,14 @@ impl PricingClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - PricingClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> PricingClient { + PricingClient { client, region } } } diff --git a/rusoto/services/qldb-session/Cargo.toml b/rusoto/services/qldb-session/Cargo.toml new file mode 100644 index 00000000000..a5b1101aeef --- /dev/null +++ b/rusoto/services/qldb-session/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - Amazon QLDB Session @ 2019-07-11" +documentation = "https://docs.rs/rusoto_qldb_session" +keywords = ["AWS", "Amazon", "qldb-session"] +license = "MIT" +name = "rusoto_qldb_session" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/qldb-session/README.md b/rusoto/services/qldb-session/README.md new file mode 100644 index 00000000000..4b7a5d8dddc --- /dev/null +++ b/rusoto/services/qldb-session/README.md @@ -0,0 +1,52 @@ + +# Rusoto QldbSession +Rust SDK for Amazon QLDB Session + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_qldb_session` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_qldb_session = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_qldb_session "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/qldb-session/src/custom/mod.rs b/rusoto/services/qldb-session/src/custom/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rusoto/services/qldb-session/src/generated.rs b/rusoto/services/qldb-session/src/generated.rs new file mode 100644 index 00000000000..6fe5179aa35 --- /dev/null +++ b/rusoto/services/qldb-session/src/generated.rs @@ -0,0 +1,389 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +///

Contains the details of the transaction to abort.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct AbortTransactionRequest {} + +///

Contains the details of the aborted transaction.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct AbortTransactionResult {} + +///

Contains the details of the transaction to commit.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CommitTransactionRequest { + ///

Specifies the commit digest for the transaction to commit. For every active transaction, the commit digest must be passed. QLDB validates CommitDigest and rejects the commit with an error if the digest computed on the client does not match the digest computed by QLDB.

+ #[serde(rename = "CommitDigest")] + #[serde( + deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", + serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", + default + )] + pub commit_digest: bytes::Bytes, + ///

Specifies the transaction id of the transaction to commit.

+ #[serde(rename = "TransactionId")] + pub transaction_id: String, +} + +///

Contains the details of the committed transaction.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CommitTransactionResult { + ///

The commit digest of the committed transaction.

+ #[serde(rename = "CommitDigest")] + #[serde( + deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", + serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", + default + )] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_digest: Option, + ///

The transaction id of the committed transaction.

+ #[serde(rename = "TransactionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transaction_id: Option, +} + +///

Specifies a request to end the session.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct EndSessionRequest {} + +///

Contains the details of the ended session.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EndSessionResult {} + +///

Specifies a request to execute a statement.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ExecuteStatementRequest { + ///

Specifies the parameters for the parameterized statement in the request.

+ #[serde(rename = "Parameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option>, + ///

Specifies the statement of the request.

+ #[serde(rename = "Statement")] + pub statement: String, + ///

Specifies the transaction id of the request.

+ #[serde(rename = "TransactionId")] + pub transaction_id: String, +} + +///

Contains the details of the executed statement.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ExecuteStatementResult { + ///

Contains the details of the first fetched page.

+ #[serde(rename = "FirstPage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub first_page: Option, +} + +///

Specifies the details of the page to be fetched.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct FetchPageRequest { + ///

Specifies the next page token of the page to be fetched.

+ #[serde(rename = "NextPageToken")] + pub next_page_token: String, + ///

Specifies the transaction id of the page to be fetched.

+ #[serde(rename = "TransactionId")] + pub transaction_id: String, +} + +///

Contains the page that was fetched.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct FetchPageResult { + ///

Contains details of the fetched page.

+ #[serde(rename = "Page")] + #[serde(skip_serializing_if = "Option::is_none")] + pub page: Option, +} + +///

Contains details of the fetched page.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Page { + ///

The token of the next page.

+ #[serde(rename = "NextPageToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_page_token: Option, + ///

A structure that contains values in multiple encoding formats.

+ #[serde(rename = "Values")] + #[serde(skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SendCommandRequest { + ///

Command to abort the current transaction.

+ #[serde(rename = "AbortTransaction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub abort_transaction: Option, + ///

Command to commit the specified transaction.

+ #[serde(rename = "CommitTransaction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_transaction: Option, + ///

Command to end the current session.

+ #[serde(rename = "EndSession")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end_session: Option, + ///

Command to execute a statement in the specified transaction.

+ #[serde(rename = "ExecuteStatement")] + #[serde(skip_serializing_if = "Option::is_none")] + pub execute_statement: Option, + ///

Command to fetch a page.

+ #[serde(rename = "FetchPage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub fetch_page: Option, + ///

Specifies the session token for the current command. A session token is constant throughout the life of the session.

To obtain a session token, run the StartSession command. This SessionToken is required for every subsequent command that is issued during the current session.

+ #[serde(rename = "SessionToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_token: Option, + ///

Command to start a new session. A session token is obtained as part of the response.

+ #[serde(rename = "StartSession")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_session: Option, + ///

Command to start a new transaction.

+ #[serde(rename = "StartTransaction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_transaction: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct SendCommandResult { + ///

Contains the details of the aborted transaction.

+ #[serde(rename = "AbortTransaction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub abort_transaction: Option, + ///

Contains the details of the committed transaction.

+ #[serde(rename = "CommitTransaction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_transaction: Option, + ///

Contains the details of the ended session.

+ #[serde(rename = "EndSession")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end_session: Option, + ///

Contains the details of the executed statement.

+ #[serde(rename = "ExecuteStatement")] + #[serde(skip_serializing_if = "Option::is_none")] + pub execute_statement: Option, + ///

Contains the details of the fetched page.

+ #[serde(rename = "FetchPage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub fetch_page: Option, + ///

Contains the details of the started session that includes a session token. This SessionToken is required for every subsequent command that is issued during the current session.

+ #[serde(rename = "StartSession")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_session: Option, + ///

Contains the details of the started transaction.

+ #[serde(rename = "StartTransaction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_transaction: Option, +} + +///

Specifies a request to start a a new session.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartSessionRequest { + ///

The name of the ledger to start a new session against.

+ #[serde(rename = "LedgerName")] + pub ledger_name: String, +} + +///

Contains the details of the started session.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartSessionResult { + ///

Session token of the started session. This SessionToken is required for every subsequent command that is issued during the current session.

+ #[serde(rename = "SessionToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_token: Option, +} + +///

Specifies a request to start a transaction.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartTransactionRequest {} + +///

Contains the details of the started transaction.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartTransactionResult { + ///

The transaction id of the started transaction.

+ #[serde(rename = "TransactionId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub transaction_id: Option, +} + +///

A structure that can contains values in multiple encoding formats.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValueHolder { + ///

An Amazon Ion binary value contained in a ValueHolder structure.

+ #[serde(rename = "IonBinary")] + #[serde( + deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", + serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", + default + )] + #[serde(skip_serializing_if = "Option::is_none")] + pub ion_binary: Option, + ///

An Amazon Ion plaintext value contained in a ValueHolder structure.

+ #[serde(rename = "IonText")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ion_text: Option, +} + +/// Errors returned by SendCommand +#[derive(Debug, PartialEq)] +pub enum SendCommandError { + ///

Returned if the request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

+ BadRequest(String), + ///

Returned if the session doesn't exist anymore because it timed-out or expired.

+ InvalidSession(String), + ///

Returned if a resource limit such as number of active sessions is exceeded.

+ LimitExceeded(String), + ///

Returned when a transaction cannot be written to the journal due to a failure in the verification phase of Optimistic Concurrency Control.

+ OccConflict(String), + ///

Returned when the rate of requests exceeds the allowed throughput.

+ RateExceeded(String), +} + +impl SendCommandError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "BadRequestException" => { + return RusotoError::Service(SendCommandError::BadRequest(err.msg)) + } + "InvalidSessionException" => { + return RusotoError::Service(SendCommandError::InvalidSession(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(SendCommandError::LimitExceeded(err.msg)) + } + "OccConflictException" => { + return RusotoError::Service(SendCommandError::OccConflict(err.msg)) + } + "RateExceededException" => { + return RusotoError::Service(SendCommandError::RateExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for SendCommandError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for SendCommandError { + fn description(&self) -> &str { + match *self { + SendCommandError::BadRequest(ref cause) => cause, + SendCommandError::InvalidSession(ref cause) => cause, + SendCommandError::LimitExceeded(ref cause) => cause, + SendCommandError::OccConflict(ref cause) => cause, + SendCommandError::RateExceeded(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the QLDB Session API. QLDB Session clients implement this trait. +pub trait QldbSession { + ///

Sends a command to an Amazon QLDB ledger.

+ fn send_command( + &self, + input: SendCommandRequest, + ) -> RusotoFuture; +} +/// A client for the QLDB Session API. +#[derive(Clone)] +pub struct QldbSessionClient { + client: Client, + region: region::Region, +} + +impl QldbSessionClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> QldbSessionClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> QldbSessionClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> QldbSessionClient { + QldbSessionClient { client, region } + } +} + +impl QldbSession for QldbSessionClient { + ///

Sends a command to an Amazon QLDB ledger.

+ fn send_command( + &self, + input: SendCommandRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "qldb", &self.region, "/"); + request.set_endpoint_prefix("session.qldb".to_string()); + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header("x-amz-target", "QLDBSession.SendCommand"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(SendCommandError::from_response(response))), + ) + } + }) + } +} diff --git a/rusoto/services/qldb-session/src/lib.rs b/rusoto/services/qldb-session/src/lib.rs new file mode 100644 index 00000000000..0ed23d06d6a --- /dev/null +++ b/rusoto/services/qldb-session/src/lib.rs @@ -0,0 +1,32 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

The transactional data APIs for Amazon QLDB

+//! +//! If you're using the service, you're probably looking for [QldbSessionClient](struct.QldbSessionClient.html) and [QldbSession](trait.QldbSession.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/qldb/Cargo.toml b/rusoto/services/qldb/Cargo.toml new file mode 100644 index 00000000000..4ee1d71b0c2 --- /dev/null +++ b/rusoto/services/qldb/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - Amazon QLDB @ 2019-01-02" +documentation = "https://docs.rs/rusoto_qldb" +keywords = ["AWS", "Amazon", "qldb"] +license = "MIT" +name = "rusoto_qldb" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/qldb/README.md b/rusoto/services/qldb/README.md new file mode 100644 index 00000000000..f961f282846 --- /dev/null +++ b/rusoto/services/qldb/README.md @@ -0,0 +1,52 @@ + +# Rusoto Qldb +Rust SDK for Amazon QLDB + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_qldb` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_qldb = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_qldb "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/qldb/src/custom/mod.rs b/rusoto/services/qldb/src/custom/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rusoto/services/qldb/src/generated.rs b/rusoto/services/qldb/src/generated.rs new file mode 100644 index 00000000000..64f462949b5 --- /dev/null +++ b/rusoto/services/qldb/src/generated.rs @@ -0,0 +1,1666 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::param::{Params, ServiceParams}; +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateLedgerRequest { + ///

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ #[serde(rename = "DeletionProtection")] + #[serde(skip_serializing_if = "Option::is_none")] + pub deletion_protection: Option, + ///

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

+ #[serde(rename = "Name")] + pub name: String, + ///

The permissions mode to assign to the ledger that you want to create.

+ #[serde(rename = "PermissionsMode")] + pub permissions_mode: String, + ///

The key-value pairs to add as tags to the ledger that you want to create. Tag keys are case sensitive. Tag values are case sensitive and can be null.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateLedgerResponse { + ///

The Amazon Resource Name (ARN) for the ledger.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

+ #[serde(rename = "CreationDateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_date_time: Option, + ///

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ #[serde(rename = "DeletionProtection")] + #[serde(skip_serializing_if = "Option::is_none")] + pub deletion_protection: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The current status of the ledger.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteLedgerRequest { + ///

The name of the ledger that you want to delete.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeJournalS3ExportRequest { + ///

The unique ID of the journal export job that you want to describe.

+ #[serde(rename = "ExportId")] + pub export_id: String, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeJournalS3ExportResponse { + ///

Information about the journal export job returned by a DescribeJournalS3Export request.

+ #[serde(rename = "ExportDescription")] + pub export_description: JournalS3ExportDescription, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeLedgerRequest { + ///

The name of the ledger that you want to describe.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeLedgerResponse { + ///

The Amazon Resource Name (ARN) for the ledger.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

+ #[serde(rename = "CreationDateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_date_time: Option, + ///

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ #[serde(rename = "DeletionProtection")] + #[serde(skip_serializing_if = "Option::is_none")] + pub deletion_protection: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The current status of the ledger.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ExportJournalToS3Request { + ///

The exclusive end date and time for the range of journal contents that you want to export.

The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

The ExclusiveEndTime must be less than or equal to the current UTC date and time.

+ #[serde(rename = "ExclusiveEndTime")] + pub exclusive_end_time: f64, + ///

The inclusive start date and time for the range of journal contents that you want to export.

The InclusiveStartTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

The InclusiveStartTime must be before ExclusiveEndTime.

If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, Amazon QLDB defaults it to the ledger's CreationDateTime.

+ #[serde(rename = "InclusiveStartTime")] + pub inclusive_start_time: f64, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, + ///

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer master key (CMK) in AWS Key Management Service (AWS KMS) for server-side encryption of your exported data.

+ #[serde(rename = "RoleArn")] + pub role_arn: String, + ///

The configuration settings of the Amazon S3 bucket destination for your export request.

+ #[serde(rename = "S3ExportConfiguration")] + pub s3_export_configuration: S3ExportConfiguration, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ExportJournalToS3Response { + ///

The unique ID that QLDB assigns to each journal export job.

To describe your export request and check the status of the job, you can use ExportId to call DescribeJournalS3Export.

+ #[serde(rename = "ExportId")] + pub export_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetBlockRequest { + ///

The location of the block that you want to request. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:14}

+ #[serde(rename = "BlockAddress")] + pub block_address: ValueHolder, + ///

The latest block location covered by the digest for which to request a proof. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:49}

+ #[serde(rename = "DigestTipAddress")] + #[serde(skip_serializing_if = "Option::is_none")] + pub digest_tip_address: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetBlockResponse { + ///

The block data object in Amazon Ion format.

+ #[serde(rename = "Block")] + pub block: ValueHolder, + ///

The proof object in Amazon Ion format returned by a GetBlock request. A proof contains the list of hash values required to recalculate the specified digest using a Merkle tree, starting with the specified block.

+ #[serde(rename = "Proof")] + #[serde(skip_serializing_if = "Option::is_none")] + pub proof: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetDigestRequest { + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetDigestResponse { + ///

The 256-bit hash value representing the digest returned by a GetDigest request.

+ #[serde(rename = "Digest")] + #[serde( + deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", + serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", + default + )] + pub digest: bytes::Bytes, + ///

The latest block location covered by the digest that you requested. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

+ #[serde(rename = "DigestTipAddress")] + pub digest_tip_address: ValueHolder, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetRevisionRequest { + ///

The block location of the document revision to be verified. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:14}

+ #[serde(rename = "BlockAddress")] + pub block_address: ValueHolder, + ///

The latest block location covered by the digest for which to request a proof. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:49}

+ #[serde(rename = "DigestTipAddress")] + #[serde(skip_serializing_if = "Option::is_none")] + pub digest_tip_address: Option, + ///

The unique ID of the document to be verified.

+ #[serde(rename = "DocumentId")] + pub document_id: String, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetRevisionResponse { + ///

The proof object in Amazon Ion format returned by a GetRevision request. A proof contains the list of hash values that are required to recalculate the specified digest using a Merkle tree, starting with the specified document revision.

+ #[serde(rename = "Proof")] + #[serde(skip_serializing_if = "Option::is_none")] + pub proof: Option, + ///

The document revision data object in Amazon Ion format.

+ #[serde(rename = "Revision")] + pub revision: ValueHolder, +} + +///

The information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct JournalS3ExportDescription { + ///

The exclusive end date and time for the range of journal contents that are specified in the original export request.

+ #[serde(rename = "ExclusiveEndTime")] + pub exclusive_end_time: f64, + ///

The date and time, in epoch time format, when the export job was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

+ #[serde(rename = "ExportCreationTime")] + pub export_creation_time: f64, + ///

The unique ID of the journal export job.

+ #[serde(rename = "ExportId")] + pub export_id: String, + ///

The inclusive start date and time for the range of journal contents that are specified in the original export request.

+ #[serde(rename = "InclusiveStartTime")] + pub inclusive_start_time: f64, + ///

The name of the ledger.

+ #[serde(rename = "LedgerName")] + pub ledger_name: String, + ///

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer master key (CMK) in AWS Key Management Service (AWS KMS) for server-side encryption of your exported data.

+ #[serde(rename = "RoleArn")] + pub role_arn: String, + #[serde(rename = "S3ExportConfiguration")] + pub s3_export_configuration: S3ExportConfiguration, + ///

The current state of the journal export job.

+ #[serde(rename = "Status")] + pub status: String, +} + +///

Information about a ledger, including its name, state, and when it was created.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct LedgerSummary { + ///

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

+ #[serde(rename = "CreationDateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_date_time: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The current status of the ledger.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListJournalS3ExportsForLedgerRequest { + ///

The maximum number of results to return in a single ListJournalS3ExportsForLedger request. (The actual number of results returned might be fewer.)

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, + ///

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalS3ExportsForLedger call, then you should use that value as input here.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListJournalS3ExportsForLedgerResponse { + ///

The array of journal export job descriptions that are associated with the specified ledger.

+ #[serde(rename = "JournalS3Exports")] + #[serde(skip_serializing_if = "Option::is_none")] + pub journal_s3_exports: Option>, + ///
  • If NextToken is empty, then the last page of results has been processed and there are no more results to be retrieved.

  • If NextToken is not empty, then there are more results available. To retrieve the next page of results, use the value of NextToken in a subsequent ListJournalS3ExportsForLedger call.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListJournalS3ExportsRequest { + ///

The maximum number of results to return in a single ListJournalS3Exports request. (The actual number of results returned might be fewer.)

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalS3Exports call, then you should use that value as input here.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListJournalS3ExportsResponse { + ///

The array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

+ #[serde(rename = "JournalS3Exports")] + #[serde(skip_serializing_if = "Option::is_none")] + pub journal_s3_exports: Option>, + ///
  • If NextToken is empty, then the last page of results has been processed and there are no more results to be retrieved.

  • If NextToken is not empty, then there are more results available. To retrieve the next page of results, use the value of NextToken in a subsequent ListJournalS3Exports call.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListLedgersRequest { + ///

The maximum number of results to return in a single ListLedgers request. (The actual number of results returned might be fewer.)

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListLedgers call, then you should use that value as input here.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListLedgersResponse { + ///

The array of ledger summaries that are associated with the current AWS account and Region.

+ #[serde(rename = "Ledgers")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ledgers: Option>, + ///

A pagination token, indicating whether there are more results available:

  • If NextToken is empty, then the last page of results has been processed and there are no more results to be retrieved.

  • If NextToken is not empty, then there are more results available. To retrieve the next page of results, use the value of NextToken in a subsequent ListLedgers call.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + ///

The Amazon Resource Name (ARN) for which you want to list the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + ///

The tags that are currently associated with the specified Amazon QLDB resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +///

The encryption settings that are used by a journal export job to write data in an Amazon Simple Storage Service (Amazon S3) bucket.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct S3EncryptionConfiguration { + ///

The Amazon Resource Name (ARN) for a customer master key (CMK) in AWS Key Management Service (AWS KMS).

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

+ #[serde(rename = "KmsKeyArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub kms_key_arn: Option, + ///

The Amazon S3 object encryption type.

To learn more about server-side encryption options in Amazon S3, see Protecting Data Using Server-Side Encryption in the Amazon S3 Developer Guide.

+ #[serde(rename = "ObjectEncryptionType")] + pub object_encryption_type: String, +} + +///

The Amazon Simple Storage Service (Amazon S3) bucket location in which a journal export job writes the journal contents.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct S3ExportConfiguration { + ///

The Amazon S3 bucket name in which a journal export job writes the journal contents.

The bucket name must comply with the Amazon S3 bucket naming conventions. For more information, see Bucket Restrictions and Limitations in the Amazon S3 Developer Guide.

+ #[serde(rename = "Bucket")] + pub bucket: String, + ///

The encryption settings that are used by a journal export job to write data in an Amazon S3 bucket.

+ #[serde(rename = "EncryptionConfiguration")] + pub encryption_configuration: S3EncryptionConfiguration, + ///

The prefix for the Amazon S3 bucket in which a journal export job writes the journal contents.

The prefix must comply with Amazon S3 key naming rules and restrictions. For more information, see Object Key and Metadata in the Amazon S3 Developer Guide.

The following are examples of valid Prefix values:

  • JournalExports-ForMyLedger/Testing/

  • JournalExports

  • My:Tests/

+ #[serde(rename = "Prefix")] + pub prefix: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

The Amazon Resource Name (ARN) to which you want to add the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The key-value pairs to add as tags to the specified QLDB resource. Tag keys are case sensitive. If you specify a key that already exists for the resource, your request fails and returns an error. Tag values are case sensitive and can be null.

+ #[serde(rename = "Tags")] + pub tags: ::std::collections::HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

The Amazon Resource Name (ARN) from which you want to remove the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The list of tag keys that you want to remove.

+ #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateLedgerRequest { + ///

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ #[serde(rename = "DeletionProtection")] + #[serde(skip_serializing_if = "Option::is_none")] + pub deletion_protection: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateLedgerResponse { + ///

The Amazon Resource Name (ARN) for the ledger.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

+ #[serde(rename = "CreationDateTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub creation_date_time: Option, + ///

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ #[serde(rename = "DeletionProtection")] + #[serde(skip_serializing_if = "Option::is_none")] + pub deletion_protection: Option, + ///

The name of the ledger.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The current status of the ledger.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +///

A structure that can contain an Amazon Ion value in multiple encoding formats.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValueHolder { + ///

An Amazon Ion plaintext value contained in a ValueHolder structure.

+ #[serde(rename = "IonText")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ion_text: Option, +} + +/// Errors returned by CreateLedger +#[derive(Debug, PartialEq)] +pub enum CreateLedgerError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

You have reached the limit on the maximum number of resources allowed.

+ LimitExceeded(String), + ///

The specified resource already exists.

+ ResourceAlreadyExists(String), + ///

The specified resource can't be modified at this time.

+ ResourceInUse(String), +} + +impl CreateLedgerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(CreateLedgerError::InvalidParameter(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateLedgerError::LimitExceeded(err.msg)) + } + "ResourceAlreadyExistsException" => { + return RusotoError::Service(CreateLedgerError::ResourceAlreadyExists(err.msg)) + } + "ResourceInUseException" => { + return RusotoError::Service(CreateLedgerError::ResourceInUse(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateLedgerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateLedgerError { + fn description(&self) -> &str { + match *self { + CreateLedgerError::InvalidParameter(ref cause) => cause, + CreateLedgerError::LimitExceeded(ref cause) => cause, + CreateLedgerError::ResourceAlreadyExists(ref cause) => cause, + CreateLedgerError::ResourceInUse(ref cause) => cause, + } + } +} +/// Errors returned by DeleteLedger +#[derive(Debug, PartialEq)] +pub enum DeleteLedgerError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource can't be modified at this time.

+ ResourceInUse(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), + ///

The operation failed because a condition wasn't satisfied in advance.

+ ResourcePreconditionNotMet(String), +} + +impl DeleteLedgerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(DeleteLedgerError::InvalidParameter(err.msg)) + } + "ResourceInUseException" => { + return RusotoError::Service(DeleteLedgerError::ResourceInUse(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteLedgerError::ResourceNotFound(err.msg)) + } + "ResourcePreconditionNotMetException" => { + return RusotoError::Service(DeleteLedgerError::ResourcePreconditionNotMet( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteLedgerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteLedgerError { + fn description(&self) -> &str { + match *self { + DeleteLedgerError::InvalidParameter(ref cause) => cause, + DeleteLedgerError::ResourceInUse(ref cause) => cause, + DeleteLedgerError::ResourceNotFound(ref cause) => cause, + DeleteLedgerError::ResourcePreconditionNotMet(ref cause) => cause, + } + } +} +/// Errors returned by DescribeJournalS3Export +#[derive(Debug, PartialEq)] +pub enum DescribeJournalS3ExportError { + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), +} + +impl DescribeJournalS3ExportError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeJournalS3ExportError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeJournalS3ExportError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeJournalS3ExportError { + fn description(&self) -> &str { + match *self { + DescribeJournalS3ExportError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DescribeLedger +#[derive(Debug, PartialEq)] +pub enum DescribeLedgerError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), +} + +impl DescribeLedgerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(DescribeLedgerError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeLedgerError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeLedgerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeLedgerError { + fn description(&self) -> &str { + match *self { + DescribeLedgerError::InvalidParameter(ref cause) => cause, + DescribeLedgerError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ExportJournalToS3 +#[derive(Debug, PartialEq)] +pub enum ExportJournalToS3Error { + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), + ///

The operation failed because a condition wasn't satisfied in advance.

+ ResourcePreconditionNotMet(String), +} + +impl ExportJournalToS3Error { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ResourceNotFoundException" => { + return RusotoError::Service(ExportJournalToS3Error::ResourceNotFound(err.msg)) + } + "ResourcePreconditionNotMetException" => { + return RusotoError::Service( + ExportJournalToS3Error::ResourcePreconditionNotMet(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ExportJournalToS3Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ExportJournalToS3Error { + fn description(&self) -> &str { + match *self { + ExportJournalToS3Error::ResourceNotFound(ref cause) => cause, + ExportJournalToS3Error::ResourcePreconditionNotMet(ref cause) => cause, + } + } +} +/// Errors returned by GetBlock +#[derive(Debug, PartialEq)] +pub enum GetBlockError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), + ///

The operation failed because a condition wasn't satisfied in advance.

+ ResourcePreconditionNotMet(String), +} + +impl GetBlockError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(GetBlockError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetBlockError::ResourceNotFound(err.msg)) + } + "ResourcePreconditionNotMetException" => { + return RusotoError::Service(GetBlockError::ResourcePreconditionNotMet(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetBlockError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetBlockError { + fn description(&self) -> &str { + match *self { + GetBlockError::InvalidParameter(ref cause) => cause, + GetBlockError::ResourceNotFound(ref cause) => cause, + GetBlockError::ResourcePreconditionNotMet(ref cause) => cause, + } + } +} +/// Errors returned by GetDigest +#[derive(Debug, PartialEq)] +pub enum GetDigestError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), + ///

The operation failed because a condition wasn't satisfied in advance.

+ ResourcePreconditionNotMet(String), +} + +impl GetDigestError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(GetDigestError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetDigestError::ResourceNotFound(err.msg)) + } + "ResourcePreconditionNotMetException" => { + return RusotoError::Service(GetDigestError::ResourcePreconditionNotMet( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetDigestError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetDigestError { + fn description(&self) -> &str { + match *self { + GetDigestError::InvalidParameter(ref cause) => cause, + GetDigestError::ResourceNotFound(ref cause) => cause, + GetDigestError::ResourcePreconditionNotMet(ref cause) => cause, + } + } +} +/// Errors returned by GetRevision +#[derive(Debug, PartialEq)] +pub enum GetRevisionError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), + ///

The operation failed because a condition wasn't satisfied in advance.

+ ResourcePreconditionNotMet(String), +} + +impl GetRevisionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(GetRevisionError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetRevisionError::ResourceNotFound(err.msg)) + } + "ResourcePreconditionNotMetException" => { + return RusotoError::Service(GetRevisionError::ResourcePreconditionNotMet( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetRevisionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetRevisionError { + fn description(&self) -> &str { + match *self { + GetRevisionError::InvalidParameter(ref cause) => cause, + GetRevisionError::ResourceNotFound(ref cause) => cause, + GetRevisionError::ResourcePreconditionNotMet(ref cause) => cause, + } + } +} +/// Errors returned by ListJournalS3Exports +#[derive(Debug, PartialEq)] +pub enum ListJournalS3ExportsError {} + +impl ListJournalS3ExportsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListJournalS3ExportsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListJournalS3ExportsError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ListJournalS3ExportsForLedger +#[derive(Debug, PartialEq)] +pub enum ListJournalS3ExportsForLedgerError {} + +impl ListJournalS3ExportsForLedgerError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListJournalS3ExportsForLedgerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListJournalS3ExportsForLedgerError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ListLedgers +#[derive(Debug, PartialEq)] +pub enum ListLedgersError {} + +impl ListLedgersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListLedgersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListLedgersError { + fn description(&self) -> &str { + match *self {} + } +} +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(ListTagsForResourceError::InvalidParameter( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::InvalidParameter(ref cause) => cause, + ListTagsForResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(TagResourceError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(TagResourceError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::InvalidParameter(ref cause) => cause, + TagResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(UntagResourceError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UntagResourceError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::InvalidParameter(ref cause) => cause, + UntagResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UpdateLedger +#[derive(Debug, PartialEq)] +pub enum UpdateLedgerError { + ///

One or more parameters in the request aren't valid.

+ InvalidParameter(String), + ///

The specified resource doesn't exist.

+ ResourceNotFound(String), +} + +impl UpdateLedgerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InvalidParameterException" => { + return RusotoError::Service(UpdateLedgerError::InvalidParameter(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateLedgerError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateLedgerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateLedgerError { + fn description(&self) -> &str { + match *self { + UpdateLedgerError::InvalidParameter(ref cause) => cause, + UpdateLedgerError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the QLDB API. QLDB clients implement this trait. +pub trait Qldb { + ///

Creates a new ledger in your AWS account.

+ fn create_ledger( + &self, + input: CreateLedgerRequest, + ) -> RusotoFuture; + + ///

Deletes a ledger and all of its contents. This action is irreversible.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ fn delete_ledger(&self, input: DeleteLedgerRequest) -> RusotoFuture<(), DeleteLedgerError>; + + ///

Returns information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

+ fn describe_journal_s3_export( + &self, + input: DescribeJournalS3ExportRequest, + ) -> RusotoFuture; + + ///

Returns information about a ledger, including its state and when it was created.

+ fn describe_ledger( + &self, + input: DescribeLedgerRequest, + ) -> RusotoFuture; + + ///

Exports journal contents within a date and time range from a ledger into a specified Amazon Simple Storage Service (Amazon S3) bucket. The data is written as files in Amazon Ion format.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name is in CREATING status, then throws ResourcePreconditionNotMetException.

You can initiate up to two concurrent journal export requests for each ledger. Beyond this limit, journal export requests throw LimitExceededException.

+ fn export_journal_to_s3( + &self, + input: ExportJournalToS3Request, + ) -> RusotoFuture; + + ///

Returns a journal block object at a specified address in a ledger. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

+ fn get_block(&self, input: GetBlockRequest) -> RusotoFuture; + + ///

Returns the digest of a ledger at the latest committed block in the journal. The response includes a 256-bit hash value and a block address.

+ fn get_digest( + &self, + input: GetDigestRequest, + ) -> RusotoFuture; + + ///

Returns a revision data object for a specified document ID and block address. Also returns a proof of the specified revision for verification if DigestTipAddress is provided.

+ fn get_revision( + &self, + input: GetRevisionRequest, + ) -> RusotoFuture; + + ///

Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

+ fn list_journal_s3_exports( + &self, + input: ListJournalS3ExportsRequest, + ) -> RusotoFuture; + + ///

Returns an array of journal export job descriptions for a specified ledger.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple times.

+ fn list_journal_s3_exports_for_ledger( + &self, + input: ListJournalS3ExportsForLedgerRequest, + ) -> RusotoFuture; + + ///

Returns an array of ledger summaries that are associated with the current AWS account and Region.

This action returns a maximum of 100 items and is paginated so that you can retrieve all the items by calling ListLedgers multiple times.

+ fn list_ledgers( + &self, + input: ListLedgersRequest, + ) -> RusotoFuture; + + ///

Returns all tags for a specified Amazon QLDB resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + + ///

Adds one or more tags to a specified Amazon QLDB resource.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, your request fails and returns an error.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + ///

Removes one or more tags from a specified Amazon QLDB resource. You can specify up to 50 tag keys to remove.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + + ///

Updates properties on a ledger.

+ fn update_ledger( + &self, + input: UpdateLedgerRequest, + ) -> RusotoFuture; +} +/// A client for the QLDB API. +#[derive(Clone)] +pub struct QldbClient { + client: Client, + region: region::Region, +} + +impl QldbClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> QldbClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> QldbClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> QldbClient { + QldbClient { client, region } + } +} + +impl Qldb for QldbClient { + ///

Creates a new ledger in your AWS account.

+ fn create_ledger( + &self, + input: CreateLedgerRequest, + ) -> RusotoFuture { + let request_uri = "/ledgers"; + + let mut request = SignedRequest::new("POST", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateLedgerError::from_response(response))), + ) + } + }) + } + + ///

Deletes a ledger and all of its contents. This action is irreversible.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

+ fn delete_ledger(&self, input: DeleteLedgerRequest) -> RusotoFuture<(), DeleteLedgerError> { + let request_uri = format!("/ledgers/{name}", name = input.name); + + let mut request = SignedRequest::new("DELETE", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = ::std::mem::drop(response); + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteLedgerError::from_response(response))), + ) + } + }) + } + + ///

Returns information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

+ fn describe_journal_s3_export( + &self, + input: DescribeJournalS3ExportRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/ledgers/{name}/journal-s3-exports/{export_id}", + export_id = input.export_id, + name = input.name + ); + + let mut request = SignedRequest::new("GET", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeJournalS3ExportError::from_response(response)) + })) + } + }) + } + + ///

Returns information about a ledger, including its state and when it was created.

+ fn describe_ledger( + &self, + input: DescribeLedgerRequest, + ) -> RusotoFuture { + let request_uri = format!("/ledgers/{name}", name = input.name); + + let mut request = SignedRequest::new("GET", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeLedgerError::from_response(response))), + ) + } + }) + } + + ///

Exports journal contents within a date and time range from a ledger into a specified Amazon Simple Storage Service (Amazon S3) bucket. The data is written as files in Amazon Ion format.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name is in CREATING status, then throws ResourcePreconditionNotMetException.

You can initiate up to two concurrent journal export requests for each ledger. Beyond this limit, journal export requests throw LimitExceededException.

+ fn export_journal_to_s3( + &self, + input: ExportJournalToS3Request, + ) -> RusotoFuture { + let request_uri = format!("/ledgers/{name}/journal-s3-exports", name = input.name); + + let mut request = SignedRequest::new("POST", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ExportJournalToS3Error::from_response(response))), + ) + } + }) + } + + ///

Returns a journal block object at a specified address in a ledger. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

+ fn get_block(&self, input: GetBlockRequest) -> RusotoFuture { + let request_uri = format!("/ledgers/{name}/block", name = input.name); + + let mut request = SignedRequest::new("POST", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetBlockError::from_response(response))), + ) + } + }) + } + + ///

Returns the digest of a ledger at the latest committed block in the journal. The response includes a 256-bit hash value and a block address.

+ fn get_digest( + &self, + input: GetDigestRequest, + ) -> RusotoFuture { + let request_uri = format!("/ledgers/{name}/digest", name = input.name); + + let mut request = SignedRequest::new("POST", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetDigestError::from_response(response))), + ) + } + }) + } + + ///

Returns a revision data object for a specified document ID and block address. Also returns a proof of the specified revision for verification if DigestTipAddress is provided.

+ fn get_revision( + &self, + input: GetRevisionRequest, + ) -> RusotoFuture { + let request_uri = format!("/ledgers/{name}/revision", name = input.name); + + let mut request = SignedRequest::new("POST", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetRevisionError::from_response(response))), + ) + } + }) + } + + ///

Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

+ fn list_journal_s3_exports( + &self, + input: ListJournalS3ExportsRequest, + ) -> RusotoFuture { + let request_uri = "/journal-s3-exports"; + + let mut request = SignedRequest::new("GET", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("max_results", x); + } + if let Some(ref x) = input.next_token { + params.put("next_token", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListJournalS3ExportsError::from_response(response)) + }), + ) + } + }) + } + + ///

Returns an array of journal export job descriptions for a specified ledger.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple times.

+ fn list_journal_s3_exports_for_ledger( + &self, + input: ListJournalS3ExportsForLedgerRequest, + ) -> RusotoFuture + { + let request_uri = format!("/ledgers/{name}/journal-s3-exports", name = input.name); + + let mut request = SignedRequest::new("GET", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("max_results", x); + } + if let Some(ref x) = input.next_token { + params.put("next_token", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(ListJournalS3ExportsForLedgerError::from_response(response)) + })) + } + }) + } + + ///

Returns an array of ledger summaries that are associated with the current AWS account and Region.

This action returns a maximum of 100 items and is paginated so that you can retrieve all the items by calling ListLedgers multiple times.

+ fn list_ledgers( + &self, + input: ListLedgersRequest, + ) -> RusotoFuture { + let request_uri = "/ledgers"; + + let mut request = SignedRequest::new("GET", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("max_results", x); + } + if let Some(ref x) = input.next_token { + params.put("next_token", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListLedgersError::from_response(response))), + ) + } + }) + } + + ///

Returns all tags for a specified Amazon QLDB resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("GET", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + + ///

Adds one or more tags to a specified Amazon QLDB resource.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, your request fails and returns an error.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("POST", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Removes one or more tags from a specified Amazon QLDB resource. You can specify up to 50 tag keys to remove.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("DELETE", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + for item in input.tag_keys.iter() { + params.put("tagKeys", item); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + + ///

Updates properties on a ledger.

+ fn update_ledger( + &self, + input: UpdateLedgerRequest, + ) -> RusotoFuture { + let request_uri = format!("/ledgers/{name}", name = input.name); + + let mut request = SignedRequest::new("PATCH", "qldb", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateLedgerError::from_response(response))), + ) + } + }) + } +} diff --git a/rusoto/services/qldb/src/lib.rs b/rusoto/services/qldb/src/lib.rs new file mode 100644 index 00000000000..a693ac97a7d --- /dev/null +++ b/rusoto/services/qldb/src/lib.rs @@ -0,0 +1,32 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

The control plane for Amazon QLDB

+//! +//! If you're using the service, you're probably looking for [QldbClient](struct.QldbClient.html) and [Qldb](trait.Qldb.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/ram/Cargo.toml b/rusoto/services/ram/Cargo.toml index 4e5eb383cbe..c500ab9a112 100644 --- a/rusoto/services/ram/Cargo.toml +++ b/rusoto/services/ram/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ram" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ram/README.md b/rusoto/services/ram/README.md index 1b09727af1c..e0fb7464976 100644 --- a/rusoto/services/ram/README.md +++ b/rusoto/services/ram/README.md @@ -23,9 +23,16 @@ To use `rusoto_ram` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_ram = "0.40.0" +rusoto_ram = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ram/src/custom/mod.rs b/rusoto/services/ram/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ram/src/custom/mod.rs +++ b/rusoto/services/ram/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ram/src/generated.rs b/rusoto/services/ram/src/generated.rs index 9c51a3a1b8f..b6bea015f7f 100644 --- a/rusoto/services/ram/src/generated.rs +++ b/rusoto/services/ram/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -37,7 +36,7 @@ pub struct AcceptResourceShareInvitationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptResourceShareInvitationResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -69,7 +68,7 @@ pub struct AssociateResourceShareRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateResourceShareResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -109,7 +108,7 @@ pub struct CreateResourceShareRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceShareResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -133,7 +132,7 @@ pub struct DeleteResourceShareRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResourceShareResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -165,7 +164,7 @@ pub struct DisassociateResourceShareRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateResourceShareResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -181,7 +180,7 @@ pub struct DisassociateResourceShareResponse { pub struct EnableSharingWithAwsOrganizationRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableSharingWithAwsOrganizationResponse { ///

Indicates whether the request succeeded.

#[serde(rename = "returnValue")] @@ -209,7 +208,7 @@ pub struct GetResourcePoliciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourcePoliciesResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -253,7 +252,7 @@ pub struct GetResourceShareAssociationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceShareAssociationsResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -286,7 +285,7 @@ pub struct GetResourceShareInvitationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceShareInvitationsResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -330,7 +329,7 @@ pub struct GetResourceSharesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourceSharesResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -374,7 +373,7 @@ pub struct ListPrincipalsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPrincipalsResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -418,7 +417,7 @@ pub struct ListResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourcesResponse { ///

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -432,7 +431,7 @@ pub struct ListResourcesResponse { ///

Describes a principal for use with AWS Resource Access Manager.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Principal { ///

The time when the principal was associated with the resource share.

#[serde(rename = "creationTime")] @@ -468,7 +467,7 @@ pub struct RejectResourceShareInvitationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectResourceShareInvitationResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -482,7 +481,7 @@ pub struct RejectResourceShareInvitationResponse { ///

Describes a resource associated with a resource share.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { ///

The Amazon Resource Name (ARN) of the resource.

#[serde(rename = "arn")] @@ -516,7 +515,7 @@ pub struct Resource { ///

Describes a resource share.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceShare { ///

Indicates whether principals outside your organization can be associated with a resource share.

#[serde(rename = "allowExternalPrincipals")] @@ -558,7 +557,7 @@ pub struct ResourceShare { ///

Describes an association with a resource share.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceShareAssociation { ///

The associated entity. For resource associations, this is the ARN of the resource. For principal associations, this is the ID of an AWS account or the ARN of an OU or organization from AWS Organizations.

#[serde(rename = "associatedEntity")] @@ -596,7 +595,7 @@ pub struct ResourceShareAssociation { ///

Describes an invitation to join a resource share.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceShareInvitation { ///

The date and time when the invitation was sent.

#[serde(rename = "invitationTimestamp")] @@ -669,7 +668,7 @@ pub struct TagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -683,7 +682,7 @@ pub struct UntagResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -706,7 +705,7 @@ pub struct UpdateResourceShareRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateResourceShareResponse { ///

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

#[serde(rename = "clientToken")] @@ -2131,10 +2130,7 @@ impl RamClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> RamClient { - RamClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2148,10 +2144,14 @@ impl RamClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - RamClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> RamClient { + RamClient { client, region } } } diff --git a/rusoto/services/rds-data/Cargo.toml b/rusoto/services/rds-data/Cargo.toml index ee8a8b70029..53ebc19105d 100644 --- a/rusoto/services/rds-data/Cargo.toml +++ b/rusoto/services/rds-data/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_rds_data" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/rds-data/README.md b/rusoto/services/rds-data/README.md index 07da49a3efe..e7a1370af37 100644 --- a/rusoto/services/rds-data/README.md +++ b/rusoto/services/rds-data/README.md @@ -23,9 +23,16 @@ To use `rusoto_rds_data` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_rds_data = "0.40.0" +rusoto_rds_data = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/rds-data/src/custom/mod.rs b/rusoto/services/rds-data/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/rds-data/src/custom/mod.rs +++ b/rusoto/services/rds-data/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/rds-data/src/generated.rs b/rusoto/services/rds-data/src/generated.rs index 23d99e8efde..96849b76fb9 100644 --- a/rusoto/services/rds-data/src/generated.rs +++ b/rusoto/services/rds-data/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -64,7 +63,7 @@ pub struct BatchExecuteStatementRequest { ///

The response elements represent the output of a SQL statement over an array of /// data.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchExecuteStatementResponse { ///

The execution results of each batch entry.

#[serde(rename = "updateResults")] @@ -95,7 +94,7 @@ pub struct BeginTransactionRequest { ///

The response elements represent the output of a request to start a SQL /// transaction.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BeginTransactionResponse { ///

The transaction ID of the transaction started by the call.

#[serde(rename = "transactionId")] @@ -105,7 +104,7 @@ pub struct BeginTransactionResponse { ///

Contains the metadata for a column.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ColumnMetadata { ///

The type of the column.

#[serde(rename = "arrayBaseColumnType")] @@ -181,7 +180,7 @@ pub struct CommitTransactionRequest { ///

The response elements represent the output of a commit transaction request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CommitTransactionResponse { ///

The status of the commit operation.

#[serde(rename = "transactionStatus")] @@ -220,7 +219,7 @@ pub struct ExecuteSqlRequest { ///

The response elements represent the output of a request to run one or more SQL /// statements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecuteSqlResponse { ///

The results of the SQL statement or statements.

#[serde(rename = "sqlStatementResults")] @@ -284,7 +283,7 @@ pub struct ExecuteStatementRequest { ///

The response elements represent the output of a request to run a SQL statement against /// a database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecuteStatementResponse { ///

Metadata for the columns included in the results.

#[serde(rename = "columnMetadata")] @@ -340,7 +339,7 @@ pub struct Field { ///

A record returned by a call.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Record { ///

The values returned in the record.

#[serde(rename = "values")] @@ -350,7 +349,7 @@ pub struct Record { ///

The result set returned by a SQL statement.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResultFrame { ///

The records in the result set.

#[serde(rename = "records")] @@ -364,7 +363,7 @@ pub struct ResultFrame { ///

The metadata of the result set returned by a SQL statement.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResultSetMetadata { ///

The number of columns in the result set.

#[serde(rename = "columnCount")] @@ -394,7 +393,7 @@ pub struct RollbackTransactionRequest { ///

The response elements represent the output of a request to perform a rollback of a /// transaction.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RollbackTransactionResponse { ///

The status of the rollback operation.

#[serde(rename = "transactionStatus")] @@ -417,7 +416,7 @@ pub struct SqlParameter { ///

The result of a SQL statement.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SqlStatementResult { ///

The number of records updated by a SQL statement.

#[serde(rename = "numberOfRecordsUpdated")] @@ -431,7 +430,7 @@ pub struct SqlStatementResult { ///

A structure value returned by a call.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StructValue { ///

The attributes returned in the record.

#[serde(rename = "attributes")] @@ -441,7 +440,7 @@ pub struct StructValue { ///

The response elements represent the results of an update.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateResult { ///

Values for fields generated during the request.

#[serde(rename = "generatedFields")] @@ -451,7 +450,7 @@ pub struct UpdateResult { ///

Contains the value of a column.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Value { ///

An array of column values.

#[serde(rename = "arrayValues")] @@ -951,10 +950,7 @@ impl RdsDataClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> RdsDataClient { - RdsDataClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -968,10 +964,14 @@ impl RdsDataClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - RdsDataClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> RdsDataClient { + RdsDataClient { client, region } } } diff --git a/rusoto/services/rds/Cargo.toml b/rusoto/services/rds/Cargo.toml index f6dd21b2674..fe697d6d807 100644 --- a/rusoto/services/rds/Cargo.toml +++ b/rusoto/services/rds/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_rds" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/rds/README.md b/rusoto/services/rds/README.md index bf4056b47ac..5b759ad3b97 100644 --- a/rusoto/services/rds/README.md +++ b/rusoto/services/rds/README.md @@ -23,9 +23,16 @@ To use `rusoto_rds` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_rds = "0.40.0" +rusoto_rds = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/rds/src/custom/mod.rs b/rusoto/services/rds/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/rds/src/custom/mod.rs +++ b/rusoto/services/rds/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/rds/src/generated.rs b/rusoto/services/rds/src/generated.rs index 26138e17f0a..747903d5318 100644 --- a/rusoto/services/rds/src/generated.rs +++ b/rusoto/services/rds/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -1193,7 +1192,7 @@ pub struct CreateDBClusterEndpointMessage { pub db_cluster_endpoint_identifier: String, ///

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

pub db_cluster_identifier: String, - ///

The type of the endpoint. One of: READER, ANY.

+ ///

The type of the endpoint. One of: READER, WRITER, ANY.

pub endpoint_type: String, ///

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

pub excluded_members: Option>, @@ -1261,13 +1260,15 @@ pub struct CreateDBClusterMessage { pub deletion_protection: Option, ///

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

+ ///

A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.

When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.

For more information, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide.

+ pub enable_http_endpoint: Option, + ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

pub enable_iam_database_authentication: Option, ///

The name of the database engine to be used for this DB cluster.

Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible Aurora), and aurora-postgresql

pub engine: String, - ///

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, or global.

+ ///

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

pub engine_mode: Option, - ///

The version number of the database engine to use.

Aurora MySQL

Example: 5.6.10a, 5.7.12

Aurora PostgreSQL

Example: 9.6.3

+ ///

The version number of the database engine to use.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5

Aurora PostgreSQL

Example: 9.6.3, 10.7

pub engine_version: Option, ///

The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.

pub global_cluster_identifier: Option, @@ -1356,6 +1357,9 @@ impl CreateDBClusterMessageSerializer { field_value, ); } + if let Some(ref field_value) = obj.enable_http_endpoint { + params.put(&format!("{}{}", prefix, "EnableHttpEndpoint"), &field_value); + } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), @@ -1612,7 +1616,7 @@ pub struct CreateDBInstanceMessage { pub db_instance_identifier: String, ///

The meaning of this parameter differs according to the database engine you use.

MySQL

The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Can't be a word reserved by the specified database engine

MariaDB

The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Can't be a word reserved by the specified database engine

PostgreSQL

The name of the database to create when the DB instance is created. If this parameter is not specified, the default "postgres" database is created in the DB instance.

Constraints:

  • Must contain 1 to 63 letters, numbers, or underscores.

  • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine

Oracle

The Oracle System ID (SID) of the created DB instance. If you specify null, the default value ORCL is used. You can't specify the string NULL, or any other reserved word, for DBName.

Default: ORCL

Constraints:

  • Can't be longer than 8 characters

SQL Server

Not applicable. Must be null.

Amazon Aurora

The name of the database to create when the primary instance of the DB cluster is created. If this parameter is not specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Can't be a word reserved by the specified database engine

pub db_name: Option, - ///

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

+ ///

The name of the DB parameter group to associate with this DB instance. If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

pub db_parameter_group_name: Option, ///

A list of DB security groups to associate with this DB instance.

Default: The default DB security group for the database engine.

pub db_security_groups: Option>, @@ -1626,7 +1630,7 @@ pub struct CreateDBInstanceMessage { pub domain_iam_role_name: Option, ///

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

MySQL

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

MySQL

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

  • For MySQL 8.0, minor version 8.0.16 or higher

PostgreSQL

  • For PostgreSQL 9.5, minor version 9.5.15 or higher

  • For PostgreSQL 9.6, minor version 9.6.11 or higher

  • PostgreSQL 10.6, 10.7, and 10.9

For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

pub enable_iam_database_authentication: Option, ///

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

pub enable_performance_insights: Option, @@ -1634,7 +1638,7 @@ pub struct CreateDBInstanceMessage { pub engine: String, ///

The version number of the database engine to use.

For a list of valid engine versions, use the DescribeDBEngineVersions action.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.

MariaDB

See MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

Microsoft SQL Server

See Version and Feature Support on Amazon RDS in the Amazon RDS User Guide.

MySQL

See MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

Oracle

See Oracle Database Engine Release Notes in the Amazon RDS User Guide.

PostgreSQL

See Supported PostgreSQL Database Versions in the Amazon RDS User Guide.

pub engine_version: Option, - ///

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance.

+ ///

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance.

pub iops: Option, ///

The AWS KMS key identifier for an encrypted DB instance.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

Amazon Aurora

Not applicable. The KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster.

If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

pub kms_key_id: Option, @@ -1644,6 +1648,8 @@ pub struct CreateDBInstanceMessage { pub master_user_password: Option, ///

The name for the master user.

Amazon Aurora

Not applicable. The name for the master user is managed by the DB cluster.

MariaDB

Constraints:

  • Required for MariaDB.

  • Must be 1 to 16 letters or numbers.

  • Can't be a reserved word for the chosen database engine.

Microsoft SQL Server

Constraints:

  • Required for SQL Server.

  • Must be 1 to 128 letters or numbers.

  • The first character must be a letter.

  • Can't be a reserved word for the chosen database engine.

MySQL

Constraints:

  • Required for MySQL.

  • Must be 1 to 16 letters or numbers.

  • First character must be a letter.

  • Can't be a reserved word for the chosen database engine.

Oracle

Constraints:

  • Required for Oracle.

  • Must be 1 to 30 letters or numbers.

  • First character must be a letter.

  • Can't be a reserved word for the chosen database engine.

PostgreSQL

Constraints:

  • Required for PostgreSQL.

  • Must be 1 to 63 letters or numbers.

  • First character must be a letter.

  • Can't be a reserved word for the chosen database engine.

pub master_username: Option, + ///

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

+ pub max_allocated_storage: Option, ///

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

pub monitoring_interval: Option, ///

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

@@ -1797,6 +1803,12 @@ impl CreateDBInstanceMessageSerializer { if let Some(ref field_value) = obj.master_username { params.put(&format!("{}{}", prefix, "MasterUsername"), &field_value); } + if let Some(ref field_value) = obj.max_allocated_storage { + params.put( + &format!("{}{}", prefix, "MaxAllocatedStorage"), + &field_value, + ); + } if let Some(ref field_value) = obj.monitoring_interval { params.put(&format!("{}{}", prefix, "MonitoringInterval"), &field_value); } @@ -1892,13 +1904,15 @@ pub struct CreateDBInstanceReadReplicaMessage { pub db_instance_class: Option, ///

The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

pub db_instance_identifier: String, + ///

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same region Read Replica, or the default DBParameterGroup for the specified DB engine for a cross region Read Replica.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

+ pub db_parameter_group_name: Option, ///

Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

Constraints:

  • Can only be specified if the source DB instance identifier specifies a DB instance in another AWS Region.

  • If supplied, must match the name of an existing DBSubnetGroup.

  • The specified DB subnet group must be in the same AWS Region in which the operation is running.

  • All Read Replicas in one AWS Region that are created from the same source DB instance must either:>

    • Specify DB subnet groups from the same VPC. All these Read Replicas are created in the same VPC.

    • Not specify a DB subnet group. All these Read Replicas are created outside of any VPC.

Example: mySubnetgroup

pub db_subnet_group_name: Option, ///

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see Deleting a DB Instance.

pub deletion_protection: Option, ///

The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

  • Aurora MySQL 5.6 or higher

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

pub enable_iam_database_authentication: Option, ///

A value that indicates whether to enable Performance Insights for the Read Replica.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

pub enable_performance_insights: Option, @@ -1965,6 +1979,12 @@ impl CreateDBInstanceReadReplicaMessageSerializer { &format!("{}{}", prefix, "DBInstanceIdentifier"), &obj.db_instance_identifier, ); + if let Some(ref field_value) = obj.db_parameter_group_name { + params.put( + &format!("{}{}", prefix, "DBParameterGroupName"), + &field_value, + ); + } if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } @@ -2652,6 +2672,8 @@ pub struct DBCluster { pub cluster_create_time: Option, ///

Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster.

pub copy_tags_to_snapshot: Option, + ///

Specifies whether the DB cluster is a clone of a DB cluster owned by a different AWS account.

+ pub cross_account_clone: Option, ///

Identifies all custom endpoints associated with the cluster.

pub custom_endpoints: Option>, ///

The Amazon Resource Name (ARN) for the DB cluster.

@@ -2682,7 +2704,7 @@ pub struct DBCluster { pub endpoint: Option, ///

Provides the name of the database engine to be used for this DB cluster.

pub engine: Option, - ///

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

+ ///

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

pub engine_mode: Option, ///

Indicates the database engine version.

pub engine_version: Option, @@ -2808,11 +2830,17 @@ impl DBClusterDeserializer { Some(TStampDeserializer::deserialize("ClusterCreateTime", stack)?); } "CopyTagsToSnapshot" => { - obj.copy_tags_to_snapshot = Some(BooleanDeserializer::deserialize( + obj.copy_tags_to_snapshot = Some(BooleanOptionalDeserializer::deserialize( "CopyTagsToSnapshot", stack, )?); } + "CrossAccountClone" => { + obj.cross_account_clone = Some(BooleanOptionalDeserializer::deserialize( + "CrossAccountClone", + stack, + )?); + } "CustomEndpoints" => { obj.custom_endpoints.get_or_insert(vec![]).extend( StringListDeserializer::deserialize("CustomEndpoints", stack)?, @@ -2862,7 +2890,7 @@ impl DBClusterDeserializer { )?); } "DeletionProtection" => { - obj.deletion_protection = Some(BooleanDeserializer::deserialize( + obj.deletion_protection = Some(BooleanOptionalDeserializer::deserialize( "DeletionProtection", stack, )?); @@ -2905,14 +2933,14 @@ impl DBClusterDeserializer { Some(StringDeserializer::deserialize("HostedZoneId", stack)?); } "HttpEndpointEnabled" => { - obj.http_endpoint_enabled = Some(BooleanDeserializer::deserialize( + obj.http_endpoint_enabled = Some(BooleanOptionalDeserializer::deserialize( "HttpEndpointEnabled", stack, )?); } "IAMDatabaseAuthenticationEnabled" => { obj.iam_database_authentication_enabled = - Some(BooleanDeserializer::deserialize( + Some(BooleanOptionalDeserializer::deserialize( "IAMDatabaseAuthenticationEnabled", stack, )?); @@ -2931,7 +2959,8 @@ impl DBClusterDeserializer { Some(StringDeserializer::deserialize("MasterUsername", stack)?); } "MultiAZ" => { - obj.multi_az = Some(BooleanDeserializer::deserialize("MultiAZ", stack)?); + obj.multi_az = + Some(BooleanOptionalDeserializer::deserialize("MultiAZ", stack)?); } "PercentProgress" => { obj.percent_progress = @@ -3177,7 +3206,7 @@ impl DBClusterCapacityInfoDeserializer { ///

This data type represents the information you need to connect to an Amazon Aurora DB cluster. This data type is used as a response element in the following actions:

  • CreateDBClusterEndpoint

  • DescribeDBClusterEndpoints

  • ModifyDBClusterEndpoint

  • DeleteDBClusterEndpoint

For the data structure that represents Amazon RDS DB instance endpoints, see Endpoint.

#[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterEndpoint { - ///

The type associated with a custom endpoint. One of: READER, ANY.

+ ///

The type associated with a custom endpoint. One of: READER, WRITER, ANY.

pub custom_endpoint_type: Option, ///

The Amazon Resource Name (ARN) for the endpoint.

pub db_cluster_endpoint_arn: Option, @@ -3347,7 +3376,7 @@ pub struct DBClusterMember { pub db_cluster_parameter_group_status: Option, ///

Specifies the instance identifier for this member of the DB cluster.

pub db_instance_identifier: Option, - ///

A value that indicates whehter the cluster member is the primary instance for the DB cluster.

+ ///

Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

pub is_cluster_writer: Option, ///

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

pub promotion_tier: Option, @@ -4299,6 +4328,8 @@ pub struct DBInstance { pub listener_endpoint: Option, ///

Contains the master username for the DB instance.

pub master_username: Option, + ///

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

+ pub max_allocated_storage: Option, ///

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.

pub monitoring_interval: Option, ///

The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

@@ -4528,6 +4559,12 @@ impl DBInstanceDeserializer { obj.master_username = Some(StringDeserializer::deserialize("MasterUsername", stack)?); } + "MaxAllocatedStorage" => { + obj.max_allocated_storage = Some(IntegerOptionalDeserializer::deserialize( + "MaxAllocatedStorage", + stack, + )?); + } "MonitoringInterval" => { obj.monitoring_interval = Some(IntegerOptionalDeserializer::deserialize( "MonitoringInterval", @@ -5194,7 +5231,7 @@ impl DBParameterGroupNameMessageDeserializer { ///

The status of the DB parameter group.

This data type is used as a response element in the following actions:

  • CreateDBInstance

  • CreateDBInstanceReadReplica

  • DeleteDBInstance

  • ModifyDBInstance

  • RebootDBInstance

  • RestoreDBInstanceFromDBSnapshot

#[derive(Default, Debug, Clone, PartialEq)] pub struct DBParameterGroupStatus { - ///

The name of the DP parameter group.

+ ///

The name of the DB parameter group.

pub db_parameter_group_name: Option, ///

The status of parameter updates.

pub parameter_apply_status: Option, @@ -6761,7 +6798,7 @@ pub struct DescribeDBClusterSnapshotsMessage { pub db_cluster_identifier: Option, ///

A specific DB cluster snapshot identifier to describe. This parameter can't be used in conjunction with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

Constraints:

  • If supplied, must match the identifier of an existing DBClusterSnapshot.

  • If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

pub db_cluster_snapshot_identifier: Option, - ///

This parameter is not currently supported.

+ ///

A filter that specifies one or more DB cluster snapshots to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs).

  • db-cluster-snapshot-id - Accepts DB cluster snapshot identifiers.

  • snapshot-type - Accepts types of DB cluster snapshots.

  • engine - Accepts names of database engines.

pub filters: Option>, ///

A value that indicates whether to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account. By default, the public snapshots are not included.

You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.

pub include_public: Option, @@ -6828,6 +6865,8 @@ pub struct DescribeDBClustersMessage { pub db_cluster_identifier: Option, ///

A filter that specifies one or more DB clusters to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.

pub filters: Option>, + ///

Optional Boolean parameter that specifies whether the output includes information about clusters shared from other AWS accounts.

+ pub include_shared: Option, ///

An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

pub marker: Option, ///

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

@@ -6856,6 +6895,9 @@ impl DescribeDBClustersMessageSerializer { field_value, ); } + if let Some(ref field_value) = obj.include_shared { + params.put(&format!("{}{}", prefix, "IncludeShared"), &field_value); + } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } @@ -6998,7 +7040,7 @@ impl DescribeDBInstanceAutomatedBackupsMessageSerializer { pub struct DescribeDBInstancesMessage { ///

The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.

Constraints:

  • If supplied, must match the identifier of an existing DBInstance.

pub db_instance_identifier: Option, - ///

A filter that specifies one or more DB instances to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.

  • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.

+ ///

A filter that specifies one or more DB instances to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.

  • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.

  • dbi-resource-id - Accepts DB instance resource identifiers. The results list will only include information about the DB instances identified by these resource identifiers.

pub filters: Option>, ///

An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

pub marker: Option, @@ -7391,7 +7433,7 @@ pub struct DescribeDBSnapshotsMessage { pub db_snapshot_identifier: Option, ///

A specific DB resource ID to describe.

pub dbi_resource_id: Option, - ///

This parameter is not currently supported.

+ ///

A filter that specifies one or more DB snapshots to describe.

Supported filters:

  • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs).

  • db-snapshot-id - Accepts DB snapshot identifiers.

  • dbi-resource-id - Accepts identifiers of source DB instances.

  • snapshot-type - Accepts types of DB snapshots.

  • engine - Accepts names of database engines.

pub filters: Option>, ///

A value that indicates whether to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account. By default, the public snapshots are not included.

You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute API.

pub include_public: Option, @@ -9600,7 +9642,7 @@ impl ModifyCurrentDBClusterCapacityMessageSerializer { pub struct ModifyDBClusterEndpointMessage { ///

The identifier of the endpoint to modify. This parameter is stored as a lowercase string.

pub db_cluster_endpoint_identifier: String, - ///

The type of the endpoint. One of: READER, ANY.

+ ///

The type of the endpoint. One of: READER, WRITER, ANY.

pub endpoint_type: Option, ///

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

pub excluded_members: Option>, @@ -9644,6 +9686,8 @@ impl ModifyDBClusterEndpointMessageSerializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBClusterMessage { + ///

A value that indicates whether major version upgrades are allowed.

Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version.

+ pub allow_major_version_upgrade: Option, ///

A value that indicates whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window.

The ApplyImmediately parameter only affects the EnableIAMDatabaseAuthentication, MasterUserPassword, and NewDBClusterIdentifier values. If the ApplyImmediately parameter is disabled, then changes to the EnableIAMDatabaseAuthentication, MasterUserPassword, and NewDBClusterIdentifier values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

By default, this parameter is disabled.

pub apply_immediately: Option, ///

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

@@ -9654,17 +9698,19 @@ pub struct ModifyDBClusterMessage { pub cloudwatch_logs_export_configuration: Option, ///

A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

pub copy_tags_to_snapshot: Option, - ///

The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

+ ///

The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

Constraints: This identifier must match the identifier of an existing DB cluster.

pub db_cluster_identifier: String, ///

The name of the DB cluster parameter group to use for the DB cluster.

pub db_cluster_parameter_group_name: Option, + ///

The name of the DB parameter group to apply to all instances of the DB cluster.

When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes aren't applied during the next maintenance window but instead are applied immediately.

Default: The existing name setting

Constraints:

  • The DB parameter group must be in the same DB parameter group family as this DB cluster.

  • The DBInstanceParameterGroupName parameter is only valid in combination with the AllowMajorVersionUpgrade parameter.

+ pub db_instance_parameter_group_name: Option, ///

A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

pub deletion_protection: Option, ///

A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.

When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.

For more information, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide.

pub enable_http_endpoint: Option, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

pub enable_iam_database_authentication: Option, - ///

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

For a list of valid engine versions, use DescribeDBEngineVersions.

+ ///

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion"

pub engine_version: Option, ///

The new password for the master database user. This password can contain any printable ASCII character except "/", """, or "@".

Constraints: Must contain from 8 to 41 characters.

pub master_user_password: Option, @@ -9693,6 +9739,12 @@ impl ModifyDBClusterMessageSerializer { prefix.push_str("."); } + if let Some(ref field_value) = obj.allow_major_version_upgrade { + params.put( + &format!("{}{}", prefix, "AllowMajorVersionUpgrade"), + &field_value, + ); + } if let Some(ref field_value) = obj.apply_immediately { params.put(&format!("{}{}", prefix, "ApplyImmediately"), &field_value); } @@ -9725,6 +9777,12 @@ impl ModifyDBClusterMessageSerializer { &field_value, ); } + if let Some(ref field_value) = obj.db_instance_parameter_group_name { + params.put( + &format!("{}{}", prefix, "DBInstanceParameterGroupName"), + &field_value, + ); + } if let Some(ref field_value) = obj.deletion_protection { params.put(&format!("{}{}", prefix, "DeletionProtection"), &field_value); } @@ -9938,7 +9996,7 @@ pub struct ModifyDBInstanceMessage { pub db_instance_class: Option, ///

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

pub db_instance_identifier: String, - ///

The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The DB instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

Default: Uses existing setting

Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

+ ///

The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically and the parameter changes isn't applied during the next maintenance window.

Default: Uses existing setting

Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

pub db_parameter_group_name: Option, ///

The port number on which the database accepts connections.

The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

MySQL

Default: 3306

Valid Values: 1150-65535

MariaDB

Default: 3306

Valid Values: 1150-65535

PostgreSQL

Default: 5432

Valid Values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid Values: 1150-65535

SQL Server

Default: 1433

Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

Amazon Aurora

Default: 3306

Valid Values: 1150-65535

pub db_port_number: Option, @@ -9952,7 +10010,7 @@ pub struct ModifyDBInstanceMessage { pub domain: Option, ///

The name of the IAM role to use when making API calls to the Directory Service.

pub domain_iam_role_name: Option, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see ModifyDBCluster.

MySQL

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

pub enable_iam_database_authentication: Option, ///

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

pub enable_performance_insights: Option, @@ -9964,6 +10022,8 @@ pub struct ModifyDBInstanceMessage { pub license_model: Option, ///

The new password for the master user. The password can include any printable ASCII character except "/", """, or "@".

Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

pub master_user_password: Option, + ///

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

+ pub max_allocated_storage: Option, ///

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

pub monitoring_interval: Option, ///

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

@@ -10108,6 +10168,12 @@ impl ModifyDBInstanceMessageSerializer { if let Some(ref field_value) = obj.master_user_password { params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); } + if let Some(ref field_value) = obj.max_allocated_storage { + params.put( + &format!("{}{}", prefix, "MaxAllocatedStorage"), + &field_value, + ); + } if let Some(ref field_value) = obj.monitoring_interval { params.put(&format!("{}{}", prefix, "MonitoringInterval"), &field_value); } @@ -10218,7 +10284,7 @@ impl ModifyDBInstanceResultDeserializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBParameterGroupMessage { - ///

The name of the DB parameter group.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

+ ///

The name of the DB parameter group.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

pub db_parameter_group_name: String, ///

An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

Valid Values (for the application method): immediate | pending-reboot

You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.

pub parameters: Vec, @@ -11582,6 +11648,8 @@ pub struct OrderableDBInstanceOption { pub supports_iops: Option, ///

True if a DB instance supports Performance Insights, otherwise false.

pub supports_performance_insights: Option, + ///

Whether or not Amazon RDS can automatically scale storage for DB instances that use the specified instance class.

+ pub supports_storage_autoscaling: Option, ///

Indicates whether a DB instance supports encrypted storage.

pub supports_storage_encryption: Option, ///

Indicates whether a DB instance is in a VPC.

@@ -11711,6 +11779,13 @@ impl OrderableDBInstanceOptionDeserializer { stack, )?); } + "SupportsStorageAutoscaling" => { + obj.supports_storage_autoscaling = + Some(BooleanOptionalDeserializer::deserialize( + "SupportsStorageAutoscaling", + stack, + )?); + } "SupportsStorageEncryption" => { obj.supports_storage_encryption = Some(BooleanDeserializer::deserialize( "SupportsStorageEncryption", @@ -11996,13 +12071,13 @@ impl PendingCloudwatchLogsExportsDeserializer { pub struct PendingMaintenanceAction { ///

The type of pending maintenance action that is available for the resource. Valid actions are system-update, db-upgrade, and hardware-maintenance.

pub action: Option, - ///

The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.

+ ///

The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date.

pub auto_applied_after_date: Option, ///

The effective date when the pending maintenance action is applied to the resource. This date takes into account opt-in requests received from the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the ForcedApplyDate. This value is blank if an opt-in request has not been received and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate.

pub current_apply_date: Option, ///

A description providing more detail about the maintenance action.

pub description: Option, - ///

The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.

+ ///

The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource.

pub forced_apply_date: Option, ///

Indicates the type of opt-in request that has been received for the resource.

pub opt_in_status: Option, @@ -12386,7 +12461,7 @@ impl PromoteReadReplicaDBClusterResultDeserializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct PromoteReadReplicaMessage { - ///

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

  • Must be a value from 0 to 8

+ ///

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

  • Must be a value from 0 to 35.

  • Can't be set to 0 if the DB instance is a source to Read Replicas.

pub backup_retention_period: Option, ///

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing Read Replica DB instance.

Example: mydbinstance

pub db_instance_identifier: String, @@ -13327,7 +13402,7 @@ impl ResetDBClusterParameterGroupMessageSerializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct ResetDBParameterGroupMessage { - ///

The name of the DB parameter group.

Constraints:

  • Must match the name of an existing DBParameterGroup.

+ ///

The name of the DB parameter group.

Constraints:

  • Must match the name of an existing DBParameterGroup.

pub db_parameter_group_name: String, ///

To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

MySQL

Valid Values (for Apply method): immediate | pending-reboot

You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

MariaDB

Valid Values (for Apply method): immediate | pending-reboot

You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

Oracle

Valid Values (for Apply method): pending-reboot

pub parameters: Option>, @@ -13427,11 +13502,11 @@ pub struct RestoreDBClusterFromS3Message { pub deletion_protection: Option, ///

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

pub enable_iam_database_authentication: Option, ///

The name of the database engine to be used for the restored DB cluster.

Valid Values: aurora, aurora-postgresql

pub engine: String, - ///

The version number of the database engine to use.

Aurora MySQL

Example: 5.6.10a

Aurora PostgreSQL

Example: 9.6.3

+ ///

The version number of the database engine to use.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5

Aurora PostgreSQL

Example: 9.6.3, 10.7

pub engine_version: Option, ///

The AWS KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KM encryption key.

If the StorageEncrypted parameter is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

pub kms_key_id: Option, @@ -13641,13 +13716,13 @@ pub struct RestoreDBClusterFromSnapshotMessage { pub deletion_protection: Option, ///

The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

pub enable_iam_database_authentication: Option, ///

The database engine to use for the new DB cluster.

Default: The same as source

Constraint: Must be compatible with the engine of the source

pub engine: String, - ///

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

+ ///

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

pub engine_mode: Option, - ///

The version of the database engine to use for the new DB cluster.

+ ///

The version of the database engine to use for the new DB cluster.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5

Aurora PostgreSQL

Example: 9.6.3, 10.7

pub engine_version: Option, ///

The AWS KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If you don't specify a value for the KmsKeyId parameter, then the following occurs:

  • If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB snapshot or DB cluster snapshot.

  • If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is not encrypted, then the restored DB cluster is not encrypted.

pub kms_key_id: Option, @@ -13804,7 +13879,7 @@ pub struct RestoreDBClusterToPointInTimeMessage { pub deletion_protection: Option, ///

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

pub enable_iam_database_authentication: Option, ///

The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter.

If you don't specify a value for the KmsKeyId parameter, then the following occurs:

  • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.

  • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.

If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.

pub kms_key_id: Option, @@ -13950,7 +14025,7 @@ pub struct RestoreDBInstanceFromDBSnapshotMessage { pub db_instance_identifier: String, ///

The database name for the restored DB instance.

This parameter doesn't apply to the MySQL, PostgreSQL, or MariaDB engines.

pub db_name: Option, - ///

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

+ ///

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

pub db_parameter_group_name: Option, ///

The identifier for the DB snapshot to restore from.

Constraints:

  • Must match the identifier of an existing DBSnapshot.

  • If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

pub db_snapshot_identifier: String, @@ -13964,7 +14039,7 @@ pub struct RestoreDBInstanceFromDBSnapshotMessage { pub domain_iam_role_name: Option, ///

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

pub enable_iam_database_authentication: Option, ///

The database engine to use for the new instance.

Default: The same as source

Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot.

Valid Values:

  • mariadb

  • mysql

  • oracle-ee

  • oracle-se2

  • oracle-se1

  • oracle-se

  • postgres

  • sqlserver-ee

  • sqlserver-se

  • sqlserver-ex

  • sqlserver-web

pub engine: Option, @@ -14166,7 +14241,7 @@ pub struct RestoreDBInstanceFromS3Message { pub db_instance_identifier: String, ///

The name of the database to create when the DB instance is created. Follow the naming rules specified in CreateDBInstance.

pub db_name: Option, - ///

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default parameter group for the specified engine is used.

+ ///

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

pub db_parameter_group_name: Option, ///

A list of DB security groups to associate with this DB instance.

Default: The default DB security group for the database engine.

pub db_security_groups: Option>, @@ -14176,7 +14251,7 @@ pub struct RestoreDBInstanceFromS3Message { pub deletion_protection: Option, ///

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

pub enable_iam_database_authentication: Option, ///

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

pub enable_performance_insights: Option, @@ -14184,7 +14259,7 @@ pub struct RestoreDBInstanceFromS3Message { pub engine: String, ///

The version number of the database engine to use. Choose the latest minor version of your database engine. For information about engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

pub engine_version: Option, - ///

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

+ ///

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

pub iops: Option, ///

The AWS KMS key identifier for an encrypted DB instance.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

If the StorageEncrypted parameter is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

pub kms_key_id: Option, @@ -14467,7 +14542,7 @@ pub struct RestoreDBInstanceToPointInTimeMessage { pub db_instance_class: Option, ///

The database name for the restored DB instance.

This parameter is not used for the MySQL or MariaDB engines.

pub db_name: Option, - ///

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

+ ///

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

pub db_parameter_group_name: Option, ///

The DB subnet group name to use for the new instance.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

pub db_subnet_group_name: Option, @@ -14479,7 +14554,7 @@ pub struct RestoreDBInstanceToPointInTimeMessage { pub domain_iam_role_name: Option, ///

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

pub enable_cloudwatch_logs_exports: Option>, - ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

+ ///

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

pub enable_iam_database_authentication: Option, ///

The database engine to use for the new instance.

Default: The same as source

Constraint: Must be compatible with the engine of the source

Valid Values:

  • mariadb

  • mysql

  • oracle-ee

  • oracle-se2

  • oracle-se1

  • oracle-se

  • postgres

  • sqlserver-ee

  • sqlserver-se

  • sqlserver-ex

  • sqlserver-web

pub engine: Option, @@ -15756,6 +15831,8 @@ pub struct ValidStorageOptions { pub storage_size: Option>, ///

The valid storage types for your DB instance. For example, gp2, io1.

pub storage_type: Option, + ///

Whether or not Amazon RDS can automatically scale storage for DB instances that use the new instance class.

+ pub supports_storage_autoscaling: Option, } struct ValidStorageOptionsDeserializer; @@ -15785,6 +15862,12 @@ impl ValidStorageOptionsDeserializer { "StorageType" => { obj.storage_type = Some(StringDeserializer::deserialize("StorageType", stack)?); } + "SupportsStorageAutoscaling" => { + obj.supports_storage_autoscaling = Some(BooleanDeserializer::deserialize( + "SupportsStorageAutoscaling", + stack, + )?); + } _ => skip_tree(stack), } Ok(()) @@ -25129,10 +25212,7 @@ impl RdsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> RdsClient { - RdsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -25146,10 +25226,14 @@ impl RdsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - RdsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> RdsClient { + RdsClient { client, region } } } @@ -25241,7 +25325,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25317,7 +25401,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25368,7 +25452,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25419,7 +25503,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25467,7 +25551,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25515,7 +25599,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25565,7 +25649,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25616,7 +25700,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25667,7 +25751,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25718,7 +25802,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25766,7 +25850,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25814,7 +25898,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25862,7 +25946,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25913,7 +25997,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -25961,7 +26045,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26009,7 +26093,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26057,7 +26141,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26108,7 +26192,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26158,7 +26242,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26206,7 +26290,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26256,7 +26340,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26307,7 +26391,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26358,7 +26442,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26406,7 +26490,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26479,7 +26563,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26530,7 +26614,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26581,7 +26665,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26682,7 +26766,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26757,7 +26841,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26807,7 +26891,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26883,7 +26967,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26933,7 +27017,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -26981,7 +27065,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27029,7 +27113,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27079,7 +27163,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27127,7 +27211,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27180,7 +27264,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27228,7 +27312,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27279,7 +27363,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27327,7 +27411,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27378,7 +27462,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27428,7 +27512,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27479,7 +27563,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27527,7 +27611,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27577,7 +27661,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27625,7 +27709,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27673,7 +27757,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27723,7 +27807,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27771,7 +27855,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27824,7 +27908,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27875,7 +27959,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27923,7 +28007,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -27971,7 +28055,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28022,7 +28106,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28068,7 +28152,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28116,7 +28200,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28166,7 +28250,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28217,7 +28301,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28268,7 +28352,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28316,7 +28400,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28367,7 +28451,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28415,7 +28499,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28468,7 +28552,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28516,7 +28600,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28567,7 +28651,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28617,7 +28701,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28665,7 +28749,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28716,7 +28800,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28764,7 +28848,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28812,7 +28896,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28863,7 +28947,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28914,7 +28998,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -28962,7 +29046,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29013,7 +29097,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29061,7 +29145,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29111,7 +29195,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29159,7 +29243,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29209,7 +29293,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29260,7 +29344,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29311,7 +29395,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29359,7 +29443,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29412,7 +29496,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29463,7 +29547,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29511,7 +29595,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29614,7 +29698,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29687,7 +29771,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29735,7 +29819,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29783,7 +29867,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29831,7 +29915,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29879,7 +29963,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29930,7 +30014,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -29978,7 +30062,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30027,7 +30111,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30075,7 +30159,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30125,7 +30209,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30176,7 +30260,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30227,7 +30311,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30278,7 +30362,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30329,7 +30413,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -30380,7 +30464,7 @@ impl Rds for RdsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/redshift/Cargo.toml b/rusoto/services/redshift/Cargo.toml index 2e5b0ddd16e..74a20346163 100644 --- a/rusoto/services/redshift/Cargo.toml +++ b/rusoto/services/redshift/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_redshift" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/redshift/README.md b/rusoto/services/redshift/README.md index 5b7a65b9e41..b08ae201990 100644 --- a/rusoto/services/redshift/README.md +++ b/rusoto/services/redshift/README.md @@ -23,9 +23,16 @@ To use `rusoto_redshift` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_redshift = "0.40.0" +rusoto_redshift = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/redshift/src/custom/mod.rs b/rusoto/services/redshift/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/redshift/src/custom/mod.rs +++ b/rusoto/services/redshift/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/redshift/src/generated.rs b/rusoto/services/redshift/src/generated.rs index 5c37529075a..d6d5d5b8b79 100644 --- a/rusoto/services/redshift/src/generated.rs +++ b/rusoto/services/redshift/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -731,6 +730,8 @@ pub struct Cluster { pub automated_snapshot_retention_period: Option, ///

The name of the Availability Zone in which the cluster is located.

pub availability_zone: Option, + ///

The availability status of the cluster for queries. Possible values are the following:

  • Available - The cluster is available for queries.

  • Unavailable - The cluster is not available for queries.

  • Maintenance - The cluster is intermittently available for queries due to maintenance activities.

  • Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.

  • Failed - The cluster failed and is not available for queries.

+ pub cluster_availability_status: Option, ///

The date and time that the cluster was created.

pub cluster_create_time: Option, ///

The unique identifier of the cluster.

@@ -769,6 +770,10 @@ pub struct Cluster { pub endpoint: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, + ///

The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.

+ pub expected_next_snapshot_schedule_time: Option, + ///

The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:

  • OnTrack - The next snapshot is expected to be taken on time.

  • Pending - The next snapshot is pending to be taken.

+ pub expected_next_snapshot_schedule_time_status: Option, ///

A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.

Values: active, applying

pub hsm_status: Option, ///

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

@@ -837,6 +842,12 @@ impl ClusterDeserializer { obj.availability_zone = Some(StringDeserializer::deserialize("AvailabilityZone", stack)?); } + "ClusterAvailabilityStatus" => { + obj.cluster_availability_status = Some(StringDeserializer::deserialize( + "ClusterAvailabilityStatus", + stack, + )?); + } "ClusterCreateTime" => { obj.cluster_create_time = Some(TStampDeserializer::deserialize("ClusterCreateTime", stack)?); @@ -938,6 +949,18 @@ impl ClusterDeserializer { stack, )?); } + "ExpectedNextSnapshotScheduleTime" => { + obj.expected_next_snapshot_schedule_time = Some( + TStampDeserializer::deserialize("ExpectedNextSnapshotScheduleTime", stack)?, + ); + } + "ExpectedNextSnapshotScheduleTimeStatus" => { + obj.expected_next_snapshot_schedule_time_status = + Some(StringDeserializer::deserialize( + "ExpectedNextSnapshotScheduleTimeStatus", + stack, + )?); + } "HsmStatus" => { obj.hsm_status = Some(HsmStatusDeserializer::deserialize("HsmStatus", stack)?); } @@ -7868,7 +7891,7 @@ pub struct ResizeClusterMessage { pub cluster_identifier: String, ///

The new cluster type for the specified cluster.

pub cluster_type: Option, - ///

The new node type for the nodes you are adding.

+ ///

The new node type for the nodes you are adding. If not specified, the cluster's current node type is used.

pub node_type: Option, ///

The new number of nodes for the cluster.

pub number_of_nodes: i64, @@ -15747,6 +15770,8 @@ pub enum RestoreFromClusterSnapshotError { InvalidRestoreFault(String), ///

The requested subnet is not valid, or not all of the subnets are in the same VPC.

InvalidSubnet(String), + ///

The tag is invalid.

+ InvalidTagFault(String), ///

The cluster subnet group does not cover all Availability Zones.

InvalidVPCNetworkStateFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

@@ -15757,6 +15782,8 @@ pub enum RestoreFromClusterSnapshotError { NumberOfNodesQuotaExceededFault(String), ///

We could not find the specified snapshot schedule.

SnapshotScheduleNotFoundFault(String), + ///

You have exceeded the number of tags allowed.

+ TagLimitExceededFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } @@ -15888,6 +15915,11 @@ impl RestoreFromClusterSnapshotError { RestoreFromClusterSnapshotError::InvalidSubnet(parsed_error.message), ) } + "InvalidTagFault" => { + return RusotoError::Service( + RestoreFromClusterSnapshotError::InvalidTagFault(parsed_error.message), + ) + } "InvalidVPCNetworkStateFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidVPCNetworkStateFault( @@ -15921,6 +15953,13 @@ impl RestoreFromClusterSnapshotError { ), ) } + "TagLimitExceededFault" => { + return RusotoError::Service( + RestoreFromClusterSnapshotError::TagLimitExceededFault( + parsed_error.message, + ), + ) + } "UnauthorizedOperation" => { return RusotoError::Service( RestoreFromClusterSnapshotError::UnauthorizedOperation( @@ -15972,6 +16011,7 @@ impl Error for RestoreFromClusterSnapshotError { RestoreFromClusterSnapshotError::InvalidElasticIpFault(ref cause) => cause, RestoreFromClusterSnapshotError::InvalidRestoreFault(ref cause) => cause, RestoreFromClusterSnapshotError::InvalidSubnet(ref cause) => cause, + RestoreFromClusterSnapshotError::InvalidTagFault(ref cause) => cause, RestoreFromClusterSnapshotError::InvalidVPCNetworkStateFault(ref cause) => cause, RestoreFromClusterSnapshotError::LimitExceededFault(ref cause) => cause, RestoreFromClusterSnapshotError::NumberOfNodesPerClusterLimitExceededFault( @@ -15979,6 +16019,7 @@ impl Error for RestoreFromClusterSnapshotError { ) => cause, RestoreFromClusterSnapshotError::NumberOfNodesQuotaExceededFault(ref cause) => cause, RestoreFromClusterSnapshotError::SnapshotScheduleNotFoundFault(ref cause) => cause, + RestoreFromClusterSnapshotError::TagLimitExceededFault(ref cause) => cause, RestoreFromClusterSnapshotError::UnauthorizedOperation(ref cause) => cause, } } @@ -16552,7 +16593,7 @@ pub trait Redshift { input: DescribeSnapshotSchedulesMessage, ) -> RusotoFuture; - ///

Returns the total amount of snapshot usage and provisioned storage for a user in megabytes.

+ ///

Returns the total amount of snapshot usage and provisioned storage in megabytes.

fn describe_storage(&self) -> RusotoFuture; ///

Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don't specify a value for the TableRestoreRequestId parameter, then DescribeTableRestoreStatus returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId.

@@ -16738,10 +16779,7 @@ impl RedshiftClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> RedshiftClient { - RedshiftClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -16755,10 +16793,14 @@ impl RedshiftClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - RedshiftClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> RedshiftClient { + RedshiftClient { client, region } } } @@ -16793,7 +16835,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -16846,7 +16888,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -16894,7 +16936,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -16942,7 +16984,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -16991,7 +17033,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17042,7 +17084,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17092,7 +17134,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17143,7 +17185,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17191,7 +17233,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17239,7 +17281,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17287,7 +17329,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17335,7 +17377,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17383,7 +17425,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17431,7 +17473,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17479,7 +17521,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17527,7 +17569,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17575,7 +17617,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17651,7 +17693,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17749,7 +17791,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -17972,7 +18014,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18020,7 +18062,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18068,7 +18110,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18116,7 +18158,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18164,7 +18206,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18212,7 +18254,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18260,7 +18302,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18308,7 +18350,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18356,7 +18398,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18407,7 +18449,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18458,7 +18500,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18506,7 +18548,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18554,7 +18596,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18605,7 +18647,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18651,7 +18693,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18699,7 +18741,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18747,7 +18789,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18797,7 +18839,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18845,7 +18887,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18893,7 +18935,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18944,7 +18986,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18992,7 +19034,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19040,7 +19082,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19059,7 +19101,7 @@ impl Redshift for RedshiftClient { }) } - ///

Returns the total amount of snapshot usage and provisioned storage for a user in megabytes.

+ ///

Returns the total amount of snapshot usage and provisioned storage in megabytes.

fn describe_storage(&self) -> RusotoFuture { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let mut params = Params::new(); @@ -19088,7 +19130,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19136,7 +19178,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19187,7 +19229,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19238,7 +19280,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19286,7 +19328,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19337,7 +19379,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19386,7 +19428,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19434,7 +19476,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19487,7 +19529,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19539,7 +19581,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19587,7 +19629,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19635,7 +19677,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19683,7 +19725,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19731,7 +19773,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19779,7 +19821,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19852,7 +19894,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19900,7 +19942,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19951,7 +19993,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19999,7 +20041,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20047,7 +20089,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20098,7 +20140,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20146,7 +20188,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20197,7 +20239,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20245,7 +20287,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20296,7 +20338,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20347,7 +20389,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20397,7 +20439,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20447,7 +20489,7 @@ impl Redshift for RedshiftClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/rekognition/Cargo.toml b/rusoto/services/rekognition/Cargo.toml index 89900e70b3f..75ef38f3b63 100644 --- a/rusoto/services/rekognition/Cargo.toml +++ b/rusoto/services/rekognition/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_rekognition" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/rekognition/README.md b/rusoto/services/rekognition/README.md index 4f8e336557a..fc12ed9d5e9 100644 --- a/rusoto/services/rekognition/README.md +++ b/rusoto/services/rekognition/README.md @@ -23,9 +23,16 @@ To use `rusoto_rekognition` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_rekognition = "0.40.0" +rusoto_rekognition = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/rekognition/src/custom/mod.rs b/rusoto/services/rekognition/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/rekognition/src/custom/mod.rs +++ b/rusoto/services/rekognition/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/rekognition/src/generated.rs b/rusoto/services/rekognition/src/generated.rs index 3803c981461..0298932af2e 100644 --- a/rusoto/services/rekognition/src/generated.rs +++ b/rusoto/services/rekognition/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Structure containing the estimated age range, in years, for a face.

Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age ranges can overlap. A face of a 5-year-old might have an estimated range of 4-6, while the face of a 6-year-old might have an estimated range of 4-8.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AgeRange { ///

The highest estimated age.

#[serde(rename = "High")] @@ -40,7 +39,7 @@ pub struct AgeRange { ///

Indicates whether or not the face has a beard, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Beard { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -54,7 +53,7 @@ pub struct Beard { ///

Identifies the bounding box around the label, face, or text. The left (x-coordinate) and top (y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).

The top and left values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200).

The width and height values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.

The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the left or top values.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BoundingBox { ///

Height of the bounding box as a ratio of the overall image height.

#[serde(rename = "Height")] @@ -76,7 +75,7 @@ pub struct BoundingBox { ///

Provides information about a celebrity recognized by the RecognizeCelebrities operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Celebrity { ///

Provides information about the celebrity's face, such as its location on the image.

#[serde(rename = "Face")] @@ -102,7 +101,7 @@ pub struct Celebrity { ///

Information about a recognized celebrity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CelebrityDetail { ///

Bounding box around the body of a celebrity.

#[serde(rename = "BoundingBox")] @@ -132,7 +131,7 @@ pub struct CelebrityDetail { ///

Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see GetCelebrityRecognition in the Amazon Rekognition Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CelebrityRecognition { ///

Information about a recognized celebrity.

#[serde(rename = "Celebrity")] @@ -146,7 +145,7 @@ pub struct CelebrityRecognition { ///

Provides information about a face in a target image that matches the source image face analyzed by CompareFaces. The Face property contains the bounding box of the face in the target image. The Similarity property is the confidence that the source image face matches the face in the bounding box.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompareFacesMatch { ///

Provides face metadata (bounding box and confidence that the bounding box actually contains a face).

#[serde(rename = "Face")] @@ -173,7 +172,7 @@ pub struct CompareFacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompareFacesResponse { ///

An array of faces in the target image that match the source image face. Each CompareFacesMatch object provides the bounding box, the confidence level that the bounding box contains a face, and the similarity score for the face in the bounding box and the face in the source image.

#[serde(rename = "FaceMatches")] @@ -199,7 +198,7 @@ pub struct CompareFacesResponse { ///

Provides face metadata for target image faces that are analyzed by CompareFaces and RecognizeCelebrities.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComparedFace { ///

Bounding box of the face.

#[serde(rename = "BoundingBox")] @@ -225,7 +224,7 @@ pub struct ComparedFace { ///

Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComparedSourceImageFace { ///

Bounding box of the face.

#[serde(rename = "BoundingBox")] @@ -237,15 +236,15 @@ pub struct ComparedSourceImageFace { pub confidence: Option, } -///

Information about a moderation label detection in a stored video.

+///

Information about an unsafe content label detection in a stored video.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContentModerationDetection { - ///

The moderation label detected by in the stored video.

+ ///

The unsafe content label detected by in the stored video.

#[serde(rename = "ModerationLabel")] #[serde(skip_serializing_if = "Option::is_none")] pub moderation_label: Option, - ///

Time, in milliseconds from the beginning of the video, that the moderation label was detected.

+ ///

Time, in milliseconds from the beginning of the video, that the unsafe content label was detected.

#[serde(rename = "Timestamp")] #[serde(skip_serializing_if = "Option::is_none")] pub timestamp: Option, @@ -259,7 +258,7 @@ pub struct CreateCollectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCollectionResponse { ///

Amazon Resource Name (ARN) of the collection. You can use this to manage permissions on your resources.

#[serde(rename = "CollectionArn")] @@ -295,7 +294,7 @@ pub struct CreateStreamProcessorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStreamProcessorResponse { ///

ARN for the newly create stream processor.

#[serde(rename = "StreamProcessorArn")] @@ -311,7 +310,7 @@ pub struct DeleteCollectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCollectionResponse { ///

HTTP status code that indicates the result of the operation.

#[serde(rename = "StatusCode")] @@ -330,7 +329,7 @@ pub struct DeleteFacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFacesResponse { ///

An array of strings (face IDs) of the faces that were deleted.

#[serde(rename = "DeletedFaces")] @@ -346,7 +345,7 @@ pub struct DeleteStreamProcessorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteStreamProcessorResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -357,7 +356,7 @@ pub struct DescribeCollectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCollectionResponse { ///

The Amazon Resource Name (ARN) of the collection.

#[serde(rename = "CollectionARN")] @@ -385,7 +384,7 @@ pub struct DescribeStreamProcessorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStreamProcessorResponse { ///

Date and time the stream processor was created

#[serde(rename = "CreationTimestamp")] @@ -441,7 +440,7 @@ pub struct DetectFacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectFacesResponse { ///

Details of each face found in the image.

#[serde(rename = "FaceDetails")] @@ -469,7 +468,7 @@ pub struct DetectLabelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectLabelsResponse { ///

Version number of the label detection model that was used to detect labels.

#[serde(rename = "LabelModelVersion")] @@ -497,9 +496,9 @@ pub struct DetectModerationLabelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectModerationLabelsResponse { - ///

Array of detected Moderation labels and the time, in millseconds from the start of the video, they were detected.

+ ///

Array of detected Moderation labels and the time, in milliseconds from the start of the video, they were detected.

#[serde(rename = "ModerationLabels")] #[serde(skip_serializing_if = "Option::is_none")] pub moderation_labels: Option>, @@ -517,7 +516,7 @@ pub struct DetectTextRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetectTextResponse { ///

An array of text that was detected in the input image.

#[serde(rename = "TextDetections")] @@ -525,9 +524,9 @@ pub struct DetectTextResponse { pub text_detections: Option>, } -///

The emotions detected on the face, and the confidence level in the determination. For example, HAPPY, SAD, and ANGRY.

+///

The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Emotion { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -541,7 +540,7 @@ pub struct Emotion { ///

Indicates whether or not the eyes on the face are open, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EyeOpen { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -555,7 +554,7 @@ pub struct EyeOpen { ///

Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Eyeglasses { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -569,7 +568,7 @@ pub struct Eyeglasses { ///

Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Face { ///

Bounding box of the face.

#[serde(rename = "BoundingBox")] @@ -595,7 +594,7 @@ pub struct Face { ///

Structure containing attributes of the face that the algorithm detected.

A FaceDetail object contains either the default facial attributes or all facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, Pose, and Quality.

GetFaceDetection is the only Amazon Rekognition Video stored video operation that can return a FaceDetail object with all attributes. To specify which attributes to return, use the FaceAttributes input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default attributes. The corresponding Start operations don't have a FaceAttributes input parameter.

  • GetCelebrityRecognition

  • GetPersonTracking

  • GetFaceSearch

The Amazon Rekognition Image DetectFaces and IndexFaces operations can return all facial attributes. To specify which attributes to return, use the Attributes input parameter for DetectFaces. For IndexFaces, use the DetectAttributes input parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaceDetail { ///

The estimated age range, in years, for the face. Low represents the lowest estimated age and High represents the highest estimated age.

#[serde(rename = "AgeRange")] @@ -613,7 +612,7 @@ pub struct FaceDetail { #[serde(rename = "Confidence")] #[serde(skip_serializing_if = "Option::is_none")] pub confidence: Option, - ///

The emotions detected on the face, and the confidence level in the determination. For example, HAPPY, SAD, and ANGRY.

+ ///

The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.

#[serde(rename = "Emotions")] #[serde(skip_serializing_if = "Option::is_none")] pub emotions: Option>, @@ -661,7 +660,7 @@ pub struct FaceDetail { ///

Information about a face detected in a video analysis request and the time the face was detected in the video.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaceDetection { ///

The face properties for the detected face.

#[serde(rename = "Face")] @@ -675,7 +674,7 @@ pub struct FaceDetection { ///

Provides face metadata. In addition, it also provides the confidence in the match of this face with the input face.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaceMatch { ///

Describes the face properties such as the bounding box, face ID, image ID of the source image, and external image ID that you assigned.

#[serde(rename = "Face")] @@ -689,7 +688,7 @@ pub struct FaceMatch { ///

Object containing both the face metadata (stored in the backend database), and facial attributes that are detected but aren't stored in the database.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaceRecord { ///

Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.

#[serde(rename = "Face")] @@ -716,7 +715,7 @@ pub struct FaceSearchSettings { ///

Gender of the face and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Gender { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -730,7 +729,7 @@ pub struct Gender { ///

Information about where the text detected by DetectText is located on an image.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Geometry { ///

An axis-aligned coarse representation of the detected text's location on the image.

#[serde(rename = "BoundingBox")] @@ -750,7 +749,7 @@ pub struct GetCelebrityInfoRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCelebrityInfoResponse { ///

The name of the celebrity.

#[serde(rename = "Name")] @@ -782,7 +781,7 @@ pub struct GetCelebrityRecognitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCelebrityRecognitionResponse { ///

Array of celebrities recognized in the video.

#[serde(rename = "Celebrities")] @@ -808,14 +807,14 @@ pub struct GetCelebrityRecognitionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetContentModerationRequest { - ///

The identifier for the content moderation job. Use JobId to identify the job in a subsequent call to GetContentModeration.

+ ///

The identifier for the unsafe content job. Use JobId to identify the job in a subsequent call to GetContentModeration.

#[serde(rename = "JobId")] pub job_id: String, ///

Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.

#[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option, - ///

If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of content moderation labels.

+ ///

If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of unsafe content labels.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -826,13 +825,13 @@ pub struct GetContentModerationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetContentModerationResponse { - ///

The current status of the content moderation job.

+ ///

The current status of the unsafe content analysis job.

#[serde(rename = "JobStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub job_status: Option, - ///

The detected moderation labels and the time(s) they were detected.

+ ///

The detected unsafe content labels and the time(s) they were detected.

#[serde(rename = "ModerationLabels")] #[serde(skip_serializing_if = "Option::is_none")] pub moderation_labels: Option>, @@ -840,7 +839,7 @@ pub struct GetContentModerationResponse { #[serde(rename = "ModerationModelVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub moderation_model_version: Option, - ///

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of moderation labels.

+ ///

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of unsafe content labels.

#[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, @@ -870,7 +869,7 @@ pub struct GetFaceDetectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFaceDetectionResponse { ///

An array of faces detected in the video. Each element contains a detected face's details and the time, in milliseconds from the start of the video, the face was detected.

#[serde(rename = "Faces")] @@ -914,7 +913,7 @@ pub struct GetFaceSearchRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFaceSearchResponse { ///

The current status of the face search job.

#[serde(rename = "JobStatus")] @@ -958,7 +957,7 @@ pub struct GetLabelDetectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLabelDetectionResponse { ///

The current status of the label detection job.

#[serde(rename = "JobStatus")] @@ -1006,7 +1005,7 @@ pub struct GetPersonTrackingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPersonTrackingResponse { ///

The current status of the person tracking job.

#[serde(rename = "JobStatus")] @@ -1050,7 +1049,7 @@ pub struct Image { ///

Identifies face image brightness and sharpness.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImageQuality { ///

Value representing brightness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a brighter face image.

#[serde(rename = "Brightness")] @@ -1089,7 +1088,7 @@ pub struct IndexFacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IndexFacesResponse { ///

The version number of the face detection model that's associated with the input collection (CollectionId).

#[serde(rename = "FaceModelVersion")] @@ -1111,7 +1110,7 @@ pub struct IndexFacesResponse { ///

An instance of a label returned by Amazon Rekognition Image (DetectLabels) or by Amazon Rekognition Video (GetLabelDetection).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Instance { ///

The position of the label instance on the image.

#[serde(rename = "BoundingBox")] @@ -1143,7 +1142,7 @@ pub struct KinesisVideoStream { ///

Structure containing details about the detected label, including the name, detected instances, parent labels, and level of confidence.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Label { ///

Level of confidence.

#[serde(rename = "Confidence")] @@ -1165,7 +1164,7 @@ pub struct Label { ///

Information about a label detected in a video analysis request and the time the label was detected in the video.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelDetection { ///

Details about the detected label.

#[serde(rename = "Label")] @@ -1179,7 +1178,7 @@ pub struct LabelDetection { ///

Indicates the location of the landmark on the face.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Landmark { ///

Type of landmark.

#[serde(rename = "Type")] @@ -1208,7 +1207,7 @@ pub struct ListCollectionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCollectionsResponse { ///

An array of collection IDs.

#[serde(rename = "CollectionIds")] @@ -1240,7 +1239,7 @@ pub struct ListFacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFacesResponse { ///

Version number of the face detection model associated with the input collection (CollectionId).

#[serde(rename = "FaceModelVersion")] @@ -1269,7 +1268,7 @@ pub struct ListStreamProcessorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStreamProcessorsResponse { ///

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of stream processors.

#[serde(rename = "NextToken")] @@ -1281,15 +1280,15 @@ pub struct ListStreamProcessorsResponse { pub stream_processors: Option>, } -///

Provides information about a single type of moderated content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

+///

Provides information about a single type of unsafe content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModerationLabel { ///

Specifies the confidence that Amazon Rekognition has that the label has been correctly identified.

If you don't specify the MinConfidence parameter in the call to DetectModerationLabels, the operation returns labels with a confidence value greater than or equal to 50 percent.

#[serde(rename = "Confidence")] #[serde(skip_serializing_if = "Option::is_none")] pub confidence: Option, - ///

The label name for the type of content detected in the image.

+ ///

The label name for the type of unsafe content detected in the image.

#[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, @@ -1301,7 +1300,7 @@ pub struct ModerationLabel { ///

Indicates whether or not the mouth on the face is open, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MouthOpen { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -1315,7 +1314,7 @@ pub struct MouthOpen { ///

Indicates whether or not the face has a mustache, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Mustache { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -1340,7 +1339,7 @@ pub struct NotificationChannel { ///

A parent label for a label. A label can have 0, 1, or more parents.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Parent { ///

The name of the parent label.

#[serde(rename = "Name")] @@ -1350,7 +1349,7 @@ pub struct Parent { ///

Details about a person detected in a video analysis request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PersonDetail { ///

Bounding box around the detected person.

#[serde(rename = "BoundingBox")] @@ -1368,7 +1367,7 @@ pub struct PersonDetail { ///

Details and path tracking information for a single time a person's path is tracked in a video. Amazon Rekognition operations that track people's paths return an array of PersonDetection objects with elements for each time a person's path is tracked in a video.

For more information, see GetPersonTracking in the Amazon Rekognition Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PersonDetection { ///

Details about a person whose path was tracked in a video.

#[serde(rename = "Person")] @@ -1382,7 +1381,7 @@ pub struct PersonDetection { ///

Information about a person whose face matches a face(s) in an Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (FaceMatch), information about the person (PersonDetail), and the time stamp for when the person was detected in a video. An array of PersonMatch objects is returned by GetFaceSearch.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PersonMatch { ///

Information about the faces in the input collection that match the face of a person in the video.

#[serde(rename = "FaceMatches")] @@ -1400,7 +1399,7 @@ pub struct PersonMatch { ///

The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.

An array of Point objects, Polygon, is returned by DetectText. Polygon represents a fine-grained polygon around detected text. For more information, see Geometry in the Amazon Rekognition Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Point { ///

The value of the X coordinate for a point on a Polygon.

#[serde(rename = "X")] @@ -1414,7 +1413,7 @@ pub struct Point { ///

Indicates the pose of the face as determined by its pitch, roll, and yaw.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Pose { ///

Value representing the face rotation on the pitch axis.

#[serde(rename = "Pitch")] @@ -1438,7 +1437,7 @@ pub struct RecognizeCelebritiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecognizeCelebritiesResponse { ///

Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 15 celebrities in an image.

#[serde(rename = "CelebrityFaces")] @@ -1476,7 +1475,7 @@ pub struct SearchFacesByImageRequest { ///

ID of the collection to search.

#[serde(rename = "CollectionId")] pub collection_id: String, - ///

(Optional) Specifies the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%.

+ ///

(Optional) Specifies the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%. The default value is 80%.

#[serde(rename = "FaceMatchThreshold")] #[serde(skip_serializing_if = "Option::is_none")] pub face_match_threshold: Option, @@ -1490,7 +1489,7 @@ pub struct SearchFacesByImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchFacesByImageResponse { ///

An array of faces that match the input face, along with the confidence in the match.

#[serde(rename = "FaceMatches")] @@ -1518,7 +1517,7 @@ pub struct SearchFacesRequest { ///

ID of a face to find matches for in the collection.

#[serde(rename = "FaceId")] pub face_id: String, - ///

Optional value specifying the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%.

+ ///

Optional value specifying the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%. The default value is 80%.

#[serde(rename = "FaceMatchThreshold")] #[serde(skip_serializing_if = "Option::is_none")] pub face_match_threshold: Option, @@ -1529,7 +1528,7 @@ pub struct SearchFacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchFacesResponse { ///

An array of faces that matched the input face, along with the confidence in the match.

#[serde(rename = "FaceMatches")] @@ -1547,7 +1546,7 @@ pub struct SearchFacesResponse { ///

Indicates whether or not the face is smiling, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Smile { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -1565,7 +1564,7 @@ pub struct StartCelebrityRecognitionRequest { #[serde(rename = "ClientRequestToken")] #[serde(skip_serializing_if = "Option::is_none")] pub client_request_token: Option, - ///

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

+ ///

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

#[serde(rename = "JobTag")] #[serde(skip_serializing_if = "Option::is_none")] pub job_tag: Option, @@ -1579,7 +1578,7 @@ pub struct StartCelebrityRecognitionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartCelebrityRecognitionResponse { ///

The identifier for the celebrity recognition analysis job. Use JobId to identify the job in a subsequent call to GetCelebrityRecognition.

#[serde(rename = "JobId")] @@ -1593,7 +1592,7 @@ pub struct StartContentModerationRequest { #[serde(rename = "ClientRequestToken")] #[serde(skip_serializing_if = "Option::is_none")] pub client_request_token: Option, - ///

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

+ ///

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

#[serde(rename = "JobTag")] #[serde(skip_serializing_if = "Option::is_none")] pub job_tag: Option, @@ -1601,19 +1600,19 @@ pub struct StartContentModerationRequest { #[serde(rename = "MinConfidence")] #[serde(skip_serializing_if = "Option::is_none")] pub min_confidence: Option, - ///

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the content moderation analysis to.

+ ///

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the unsafe content analysis to.

#[serde(rename = "NotificationChannel")] #[serde(skip_serializing_if = "Option::is_none")] pub notification_channel: Option, - ///

The video in which you want to moderate content. The video must be stored in an Amazon S3 bucket.

+ ///

The video in which you want to detect unsafe content. The video must be stored in an Amazon S3 bucket.

#[serde(rename = "Video")] pub video: Video, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartContentModerationResponse { - ///

The identifier for the content moderation analysis job. Use JobId to identify the job in a subsequent call to GetContentModeration.

+ ///

The identifier for the unsafe content analysis job. Use JobId to identify the job in a subsequent call to GetContentModeration.

#[serde(rename = "JobId")] #[serde(skip_serializing_if = "Option::is_none")] pub job_id: Option, @@ -1629,7 +1628,7 @@ pub struct StartFaceDetectionRequest { #[serde(rename = "FaceAttributes")] #[serde(skip_serializing_if = "Option::is_none")] pub face_attributes: Option, - ///

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

+ ///

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

#[serde(rename = "JobTag")] #[serde(skip_serializing_if = "Option::is_none")] pub job_tag: Option, @@ -1643,7 +1642,7 @@ pub struct StartFaceDetectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartFaceDetectionResponse { ///

The identifier for the face detection job. Use JobId to identify the job in a subsequent call to GetFaceDetection.

#[serde(rename = "JobId")] @@ -1660,11 +1659,11 @@ pub struct StartFaceSearchRequest { ///

ID of the collection that contains the faces you want to search for.

#[serde(rename = "CollectionId")] pub collection_id: String, - ///

The minimum confidence in the person match to return. For example, don't return any matches where confidence in matches is less than 70%.

+ ///

The minimum confidence in the person match to return. For example, don't return any matches where confidence in matches is less than 70%. The default value is 80%.

#[serde(rename = "FaceMatchThreshold")] #[serde(skip_serializing_if = "Option::is_none")] pub face_match_threshold: Option, - ///

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

+ ///

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

#[serde(rename = "JobTag")] #[serde(skip_serializing_if = "Option::is_none")] pub job_tag: Option, @@ -1678,7 +1677,7 @@ pub struct StartFaceSearchRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartFaceSearchResponse { ///

The identifier for the search job. Use JobId to identify the job in a subsequent call to GetFaceSearch.

#[serde(rename = "JobId")] @@ -1692,7 +1691,7 @@ pub struct StartLabelDetectionRequest { #[serde(rename = "ClientRequestToken")] #[serde(skip_serializing_if = "Option::is_none")] pub client_request_token: Option, - ///

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

+ ///

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

#[serde(rename = "JobTag")] #[serde(skip_serializing_if = "Option::is_none")] pub job_tag: Option, @@ -1710,7 +1709,7 @@ pub struct StartLabelDetectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartLabelDetectionResponse { ///

The identifier for the label detection job. Use JobId to identify the job in a subsequent call to GetLabelDetection.

#[serde(rename = "JobId")] @@ -1724,7 +1723,7 @@ pub struct StartPersonTrackingRequest { #[serde(rename = "ClientRequestToken")] #[serde(skip_serializing_if = "Option::is_none")] pub client_request_token: Option, - ///

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

+ ///

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

#[serde(rename = "JobTag")] #[serde(skip_serializing_if = "Option::is_none")] pub job_tag: Option, @@ -1738,7 +1737,7 @@ pub struct StartPersonTrackingRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartPersonTrackingResponse { ///

The identifier for the person detection job. Use JobId to identify the job in a subsequent call to GetPersonTracking.

#[serde(rename = "JobId")] @@ -1754,7 +1753,7 @@ pub struct StartStreamProcessorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartStreamProcessorResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1765,12 +1764,12 @@ pub struct StopStreamProcessorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopStreamProcessorResponse {} ///

An object that recognizes faces in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request parameters for CreateStreamProcessor describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StreamProcessor { ///

Name of the Amazon Rekognition stream processor.

#[serde(rename = "Name")] @@ -1811,7 +1810,7 @@ pub struct StreamProcessorSettings { ///

Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Sunglasses { ///

Level of confidence in the determination.

#[serde(rename = "Confidence")] @@ -1825,7 +1824,7 @@ pub struct Sunglasses { ///

Information about a word or line of text detected by DetectText.

The DetectedText field contains the text that Amazon Rekognition detected in the image.

Every word and line has an identifier (Id). Each word belongs to a line and has a parent identifier (ParentId) that identifies the line of text in which the word appears. The word Id is also an index for the word within a line of words.

For more information, see Detecting Text in the Amazon Rekognition Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TextDetection { ///

The confidence that Amazon Rekognition has in the accuracy of the detected text and the accuracy of the geometry points around the detected text.

#[serde(rename = "Confidence")] @@ -1855,7 +1854,7 @@ pub struct TextDetection { ///

A face that IndexFaces detected, but didn't index. Use the Reasons response attribute to determine why a face wasn't indexed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnindexedFace { ///

The structure that contains attributes of a face that IndexFacesdetected, but didn't index.

#[serde(rename = "FaceDetail")] @@ -1878,7 +1877,7 @@ pub struct Video { ///

Information about a video that Amazon Rekognition analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VideoMetadata { ///

Type of compression used in the analyzed video.

#[serde(rename = "Codec")] @@ -4620,7 +4619,7 @@ pub trait Rekognition { input: DetectLabelsRequest, ) -> RusotoFuture; - ///

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

+ ///

Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

fn detect_moderation_labels( &self, input: DetectModerationLabelsRequest, @@ -4644,7 +4643,7 @@ pub trait Rekognition { input: GetCelebrityRecognitionRequest, ) -> RusotoFuture; - ///

Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

+ ///

Gets the unsafe content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Unsafe content analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected unsafe content labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

fn get_content_moderation( &self, input: GetContentModerationRequest, @@ -4722,7 +4721,7 @@ pub trait Rekognition { input: StartCelebrityRecognitionRequest, ) -> RusotoFuture; - ///

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

+ ///

Starts asynchronous detection of unsafe content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When unsafe content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

fn start_content_moderation( &self, input: StartContentModerationRequest, @@ -4776,10 +4775,7 @@ impl RekognitionClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> RekognitionClient { - RekognitionClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4793,10 +4789,14 @@ impl RekognitionClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - RekognitionClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> RekognitionClient { + RekognitionClient { client, region } } } @@ -5086,7 +5086,7 @@ impl Rekognition for RekognitionClient { }) } - ///

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

+ ///

Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

fn detect_moderation_labels( &self, input: DetectModerationLabelsRequest, @@ -5198,7 +5198,7 @@ impl Rekognition for RekognitionClient { }) } - ///

Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

+ ///

Gets the unsafe content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Unsafe content analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected unsafe content labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

fn get_content_moderation( &self, input: GetContentModerationRequest, @@ -5572,7 +5572,7 @@ impl Rekognition for RekognitionClient { }) } - ///

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

+ ///

Starts asynchronous detection of unsafe content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When unsafe content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

fn start_content_moderation( &self, input: StartContentModerationRequest, diff --git a/rusoto/services/resource-groups/Cargo.toml b/rusoto/services/resource-groups/Cargo.toml index a2b6ebf3d61..18379cd3b15 100644 --- a/rusoto/services/resource-groups/Cargo.toml +++ b/rusoto/services/resource-groups/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_resource_groups" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/resource-groups/README.md b/rusoto/services/resource-groups/README.md index 18050b9df08..8d9c97e02a0 100644 --- a/rusoto/services/resource-groups/README.md +++ b/rusoto/services/resource-groups/README.md @@ -23,9 +23,16 @@ To use `rusoto_resource_groups` in your application, add it as a dependency in y ```toml [dependencies] -rusoto_resource_groups = "0.40.0" +rusoto_resource_groups = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/resource-groups/src/custom/mod.rs b/rusoto/services/resource-groups/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/resource-groups/src/custom/mod.rs +++ b/rusoto/services/resource-groups/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/resource-groups/src/generated.rs b/rusoto/services/resource-groups/src/generated.rs index 4f7e467dc0c..84e47755e45 100644 --- a/rusoto/services/resource-groups/src/generated.rs +++ b/rusoto/services/resource-groups/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -44,7 +43,7 @@ pub struct CreateGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupOutput { ///

A full description of the resource group after it is created.

#[serde(rename = "Group")] @@ -68,7 +67,7 @@ pub struct DeleteGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGroupOutput { ///

A full description of the deleted resource group.

#[serde(rename = "Group")] @@ -84,7 +83,7 @@ pub struct GetGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupOutput { ///

A full description of the resource group.

#[serde(rename = "Group")] @@ -100,7 +99,7 @@ pub struct GetGroupQueryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupQueryOutput { ///

The resource query associated with the specified group.

#[serde(rename = "GroupQuery")] @@ -116,7 +115,7 @@ pub struct GetTagsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTagsOutput { ///

The ARN of the tagged resource group.

#[serde(rename = "Arn")] @@ -130,7 +129,7 @@ pub struct GetTagsOutput { ///

A resource group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Group { ///

The description of the resource group.

#[serde(rename = "Description")] @@ -157,7 +156,7 @@ pub struct GroupFilter { ///

The ARN and group name of a group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupIdentifier { ///

The ARN of a resource group.

#[serde(rename = "GroupArn")] @@ -171,7 +170,7 @@ pub struct GroupIdentifier { ///

The underlying resource query of a resource group. Resources that match query results are part of the group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupQuery { ///

The name of a resource group that is associated with a specific resource query.

#[serde(rename = "GroupName")] @@ -201,7 +200,7 @@ pub struct ListGroupResourcesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupResourcesOutput { ///

The NextToken value to include in a subsequent ListGroupResources request, to get more results.

#[serde(rename = "NextToken")] @@ -234,7 +233,7 @@ pub struct ListGroupsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupsOutput { ///

A list of GroupIdentifier objects. Each identifier is an object that contains both the GroupName and the GroupArn.

#[serde(rename = "GroupIdentifiers")] @@ -248,7 +247,7 @@ pub struct ListGroupsOutput { ///

A two-part error structure that can occur in ListGroupResources or SearchResources operations on CloudFormation stack-based queries. The error occurs if the CloudFormation stack on which the query is based either does not exist, or has a status that renders the stack inactive. A QueryError occurrence does not necessarily mean that AWS Resource Groups could not complete the operation, but the resulting group might have no member resources.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct QueryError { ///

Possible values are CLOUDFORMATION_STACK_INACTIVE and CLOUDFORMATION_STACK_NOT_EXISTING.

#[serde(rename = "ErrorCode")] @@ -273,7 +272,7 @@ pub struct ResourceFilter { ///

The ARN of a resource, and its resource type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceIdentifier { ///

The ARN of a resource.

#[serde(rename = "ResourceArn")] @@ -312,7 +311,7 @@ pub struct SearchResourcesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchResourcesOutput { ///

The NextToken value to include in a subsequent SearchResources request, to get more results.

#[serde(rename = "NextToken")] @@ -339,7 +338,7 @@ pub struct TagInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagOutput { ///

The ARN of the tagged resource.

#[serde(rename = "Arn")] @@ -362,7 +361,7 @@ pub struct UntagInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagOutput { ///

The ARN of the resource from which tags have been removed.

#[serde(rename = "Arn")] @@ -386,7 +385,7 @@ pub struct UpdateGroupInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGroupOutput { ///

The full description of the resource group after it has been updated.

#[serde(rename = "Group")] @@ -405,7 +404,7 @@ pub struct UpdateGroupQueryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGroupQueryOutput { ///

The resource query associated with the resource group after the update.

#[serde(rename = "GroupQuery")] @@ -1235,10 +1234,7 @@ impl ResourceGroupsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ResourceGroupsClient { - ResourceGroupsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1252,10 +1248,14 @@ impl ResourceGroupsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ResourceGroupsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ResourceGroupsClient { + ResourceGroupsClient { client, region } } } diff --git a/rusoto/services/resourcegroupstaggingapi/Cargo.toml b/rusoto/services/resourcegroupstaggingapi/Cargo.toml index f40c8eff327..b18eabe4614 100644 --- a/rusoto/services/resourcegroupstaggingapi/Cargo.toml +++ b/rusoto/services/resourcegroupstaggingapi/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_resourcegroupstaggingapi" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/resourcegroupstaggingapi/README.md b/rusoto/services/resourcegroupstaggingapi/README.md index 1d31af140bc..aca10e111cd 100644 --- a/rusoto/services/resourcegroupstaggingapi/README.md +++ b/rusoto/services/resourcegroupstaggingapi/README.md @@ -23,9 +23,16 @@ To use `rusoto_resourcegroupstaggingapi` in your application, add it as a depend ```toml [dependencies] -rusoto_resourcegroupstaggingapi = "0.40.0" +rusoto_resourcegroupstaggingapi = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/resourcegroupstaggingapi/src/custom/mod.rs b/rusoto/services/resourcegroupstaggingapi/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/resourcegroupstaggingapi/src/custom/mod.rs +++ b/rusoto/services/resourcegroupstaggingapi/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/resourcegroupstaggingapi/src/generated.rs b/rusoto/services/resourcegroupstaggingapi/src/generated.rs index 252f5b607cc..19d134a04e4 100644 --- a/rusoto/services/resourcegroupstaggingapi/src/generated.rs +++ b/rusoto/services/resourcegroupstaggingapi/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Details of the common errors that all actions return.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailureInfo { ///

The code of the common error. Valid values include InternalServiceException, InvalidParameterException, and any valid error code returned by the AWS service that hosts the resource that you want to tag.

#[serde(rename = "ErrorCode")] @@ -48,26 +47,26 @@ pub struct GetResourcesInput { #[serde(rename = "PaginationToken")] #[serde(skip_serializing_if = "Option::is_none")] pub pagination_token: Option, - ///

The constraints on the resources that you want returned. The format of each resource type is service[:resourceType]. For example, specifying a resource type of ec2 returns all tagged Amazon EC2 resources (which includes tagged EC2 instances). Specifying a resource type of ec2:instance returns only EC2 instances.

The string for each service name and resource type is the same as that embedded in a resource's Amazon Resource Name (ARN). Consult the AWS General Reference for the following:

+ ///

The constraints on the resources that you want returned. The format of each resource type is service[:resourceType]. For example, specifying a resource type of ec2 returns all Amazon EC2 resources (which includes EC2 instances). Specifying a resource type of ec2:instance returns only EC2 instances.

The string for each service name and resource type is the same as that embedded in a resource's Amazon Resource Name (ARN). Consult the AWS General Reference for the following:

You can specify multiple resource types by using an array. The array can include up to 100 items. Note that the length constraint requirement applies to each resource type filter.

#[serde(rename = "ResourceTypeFilters")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_type_filters: Option>, - ///

A limit that restricts the number of resources returned by GetResources in paginated output. You can set ResourcesPerPage to a minimum of 1 item and the maximum of 50 items.

+ ///

A limit that restricts the number of resources returned by GetResources in paginated output. You can set ResourcesPerPage to a minimum of 1 item and the maximum of 100 items.

#[serde(rename = "ResourcesPerPage")] #[serde(skip_serializing_if = "Option::is_none")] pub resources_per_page: Option, - ///

A list of tags (keys and values). A request can include up to 50 keys, and each key can include up to 20 values.

If you specify multiple filters connected by an AND operator in a single request, the response returns only those resources that are associated with every specified filter.

If you specify multiple filters connected by an OR operator in a single request, the response returns all resources that are associated with at least one or possibly more of the specified filters.

+ ///

A list of TagFilters (keys and values). Each TagFilter specified must contain a key with values as optional. A request can include up to 50 keys, and each key can include up to 20 values.

Note the following when deciding how to use TagFilters:

  • If you do specify a TagFilter, the response returns only those resources that are currently associated with the specified tag.

  • If you don't specify a TagFilter, the response includes all resources that were ever associated with tags. Resources that currently don't have associated tags are shown with an empty tag set, like this: "Tags": [].

  • If you specify more than one filter in a single request, the response returns only those resources that satisfy all specified filters.

  • If you specify a filter that contains more than one value for a key, the response returns resources that match any of the specified values for that key.

  • If you don't specify any values for a key, the response returns resources that are tagged with that key irrespective of the value.

    For example, for filters: filter1 = {key1, {value1}}, filter2 = {key2, {value2,value3,value4}} , filter3 = {key3}:

    • GetResources( {filter1} ) returns resources tagged with key1=value1

    • GetResources( {filter2} ) returns resources tagged with key2=value2 or key2=value3 or key2=value4

    • GetResources( {filter3} ) returns resources tagged with any tag containing key3 as its tag key, irrespective of its value

    • GetResources( {filter1,filter2,filter3} ) returns resources tagged with ( key1=value1) and ( key2=value2 or key2=value3 or key2=value4) and (key3, irrespective of the value)

#[serde(rename = "TagFilters")] #[serde(skip_serializing_if = "Option::is_none")] pub tag_filters: Option>, - ///

A limit that restricts the number of tags (key and value pairs) returned by GetResources in paginated output. A resource with no tags is counted as having one tag (one key and value pair).

GetResources does not split a resource and its associated tags across pages. If the specified TagsPerPage would cause such a break, a PaginationToken is returned in place of the affected resource and its tags. Use that token in another request to get the remaining data. For example, if you specify a TagsPerPage of 100 and the account has 22 resources with 10 tags each (meaning that each resource has 10 key and value pairs), the output will consist of 3 pages, with the first page displaying the first 10 resources, each with its 10 tags, the second page displaying the next 10 resources each with its 10 tags, and the third page displaying the remaining 2 resources, each with its 10 tags.

You can set TagsPerPage to a minimum of 100 items and the maximum of 500 items.

+ ///

A limit that restricts the number of tags (key and value pairs) returned by GetResources in paginated output. A resource with no tags is counted as having one tag (one key and value pair).

GetResources does not split a resource and its associated tags across pages. If the specified TagsPerPage would cause such a break, a PaginationToken is returned in place of the affected resource and its tags. Use that token in another request to get the remaining data. For example, if you specify a TagsPerPage of 100 and the account has 22 resources with 10 tags each (meaning that each resource has 10 key and value pairs), the output will consist of 3 pages, with the first page displaying the first 10 resources, each with its 10 tags, the second page displaying the next 10 resources each with its 10 tags, and the third page displaying the remaining 2 resources, each with its 10 tags.

You can set TagsPerPage to a minimum of 100 items and the maximum of 500 items.

#[serde(rename = "TagsPerPage")] #[serde(skip_serializing_if = "Option::is_none")] pub tags_per_page: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourcesOutput { ///

A string that indicates that the response contains more data than can be returned in a single response. To receive additional data, specify this string for the PaginationToken value in a subsequent request.

#[serde(rename = "PaginationToken")] @@ -88,7 +87,7 @@ pub struct GetTagKeysInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTagKeysOutput { ///

A string that indicates that the response contains more data than can be returned in a single response. To receive additional data, specify this string for the PaginationToken value in a subsequent request.

#[serde(rename = "PaginationToken")] @@ -112,7 +111,7 @@ pub struct GetTagValuesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTagValuesOutput { ///

A string that indicates that the response contains more data than can be returned in a single response. To receive additional data, specify this string for the PaginationToken value in a subsequent request.

#[serde(rename = "PaginationToken")] @@ -126,9 +125,9 @@ pub struct GetTagValuesOutput { ///

A list of resource ARNs and the tags (keys and values) that are associated with each.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceTagMapping { - ///

An array of resource ARN(s).

+ ///

The ARN of the resource.

#[serde(rename = "ResourceARN")] #[serde(skip_serializing_if = "Option::is_none")] pub resource_arn: Option, @@ -140,7 +139,7 @@ pub struct ResourceTagMapping { ///

The metadata that you apply to AWS resources to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. For more information, see Tag Basics in the Amazon EC2 User Guide for Linux Instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Tag { ///

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

#[serde(rename = "Key")] @@ -174,7 +173,7 @@ pub struct TagResourcesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourcesOutput { ///

Details of resources that could not be tagged. An error code, status code, and error message are returned for each failed item.

#[serde(rename = "FailedResourcesMap")] @@ -193,7 +192,7 @@ pub struct UntagResourcesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourcesOutput { ///

Details of resources that could not be untagged. An error code, status code, and error message are returned for each failed item.

#[serde(rename = "FailedResourcesMap")] @@ -446,7 +445,7 @@ impl Error for UntagResourcesError { } /// Trait representing the capabilities of the AWS Resource Groups Tagging API API. AWS Resource Groups Tagging API clients implement this trait. pub trait ResourceGroupsTaggingApi { - ///

Returns all the tagged resources that are associated with the specified tags (keys and values) located in the specified region for the AWS account. The tags and the resource types that you specify in the request are known as filters. The response includes all tags that are associated with the requested resources. If no filter is provided, this action returns a paginated resource list with the associated tags.

+ ///

Returns all the tagged or previously tagged resources that are located in the specified region for the AWS account. You can optionally specify filters (tags and resource types) in your request, depending on what information you want returned. The response includes all tags that are associated with the requested resources.

You can check the PaginationToken response parameter to determine if a query completed. Queries can occasionally return fewer results on a page than allowed. The PaginationToken response parameter value is null only when there are no more results to display.

fn get_resources( &self, input: GetResourcesInput, @@ -464,13 +463,13 @@ pub trait ResourceGroupsTaggingApi { input: GetTagValuesInput, ) -> RusotoFuture; - ///

Applies one or more tags to the specified resources. Note the following:

  • Not all resources can have tags. For a list of resources that support tagging, see Supported Resources in the AWS Resource Groups and Tag Editor User Guide.

  • Each resource can have up to 50 tags. For other limits, see Tag Restrictions in the Amazon EC2 User Guide for Linux Instances.

  • You can only tag resources that are located in the specified region for the AWS account.

  • To add tags to a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for adding tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups and Tag Editor User Guide.

+ ///

Applies one or more tags to the specified resources. Note the following:

  • Not all resources can have tags. For a list of resources that support tagging, see Supported Resources in the AWS Resource Groups User Guide.

  • Each resource can have up to 50 tags. For other limits, see Tag Restrictions in the Amazon EC2 User Guide for Linux Instances.

  • You can only tag resources that are located in the specified region for the AWS account.

  • To add tags to a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for adding tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups User Guide.

fn tag_resources( &self, input: TagResourcesInput, ) -> RusotoFuture; - ///

Removes the specified tags from the specified resources. When you specify a tag key, the action removes both that key and its associated value. The operation succeeds even if you attempt to remove tags from a resource that were already removed. Note the following:

  • To remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups and Tag Editor User Guide.

  • You can only tag resources that are located in the specified region for the AWS account.

+ ///

Removes the specified tags from the specified resources. When you specify a tag key, the action removes both that key and its associated value. The operation succeeds even if you attempt to remove tags from a resource that were already removed. Note the following:

  • To remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups User Guide.

  • You can only tag resources that are located in the specified region for the AWS account.

fn untag_resources( &self, input: UntagResourcesInput, @@ -488,10 +487,7 @@ impl ResourceGroupsTaggingApiClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ResourceGroupsTaggingApiClient { - ResourceGroupsTaggingApiClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -505,15 +501,22 @@ impl ResourceGroupsTaggingApiClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ResourceGroupsTaggingApiClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client( + client: Client, + region: region::Region, + ) -> ResourceGroupsTaggingApiClient { + ResourceGroupsTaggingApiClient { client, region } } } impl ResourceGroupsTaggingApi for ResourceGroupsTaggingApiClient { - ///

Returns all the tagged resources that are associated with the specified tags (keys and values) located in the specified region for the AWS account. The tags and the resource types that you specify in the request are known as filters. The response includes all tags that are associated with the requested resources. If no filter is provided, this action returns a paginated resource list with the associated tags.

+ ///

Returns all the tagged or previously tagged resources that are located in the specified region for the AWS account. You can optionally specify filters (tags and resource types) in your request, depending on what information you want returned. The response includes all tags that are associated with the requested resources.

You can check the PaginationToken response parameter to determine if a query completed. Queries can occasionally return fewer results on a page than allowed. The PaginationToken response parameter value is null only when there are no more results to display.

fn get_resources( &self, input: GetResourcesInput, @@ -609,7 +612,7 @@ impl ResourceGroupsTaggingApi for ResourceGroupsTaggingApiClient { }) } - ///

Applies one or more tags to the specified resources. Note the following:

  • Not all resources can have tags. For a list of resources that support tagging, see Supported Resources in the AWS Resource Groups and Tag Editor User Guide.

  • Each resource can have up to 50 tags. For other limits, see Tag Restrictions in the Amazon EC2 User Guide for Linux Instances.

  • You can only tag resources that are located in the specified region for the AWS account.

  • To add tags to a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for adding tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups and Tag Editor User Guide.

+ ///

Applies one or more tags to the specified resources. Note the following:

  • Not all resources can have tags. For a list of resources that support tagging, see Supported Resources in the AWS Resource Groups User Guide.

  • Each resource can have up to 50 tags. For other limits, see Tag Restrictions in the Amazon EC2 User Guide for Linux Instances.

  • You can only tag resources that are located in the specified region for the AWS account.

  • To add tags to a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for adding tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups User Guide.

fn tag_resources( &self, input: TagResourcesInput, @@ -641,7 +644,7 @@ impl ResourceGroupsTaggingApi for ResourceGroupsTaggingApiClient { }) } - ///

Removes the specified tags from the specified resources. When you specify a tag key, the action removes both that key and its associated value. The operation succeeds even if you attempt to remove tags from a resource that were already removed. Note the following:

  • To remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups and Tag Editor User Guide.

  • You can only tag resources that are located in the specified region for the AWS account.

+ ///

Removes the specified tags from the specified resources. When you specify a tag key, the action removes both that key and its associated value. The operation succeeds even if you attempt to remove tags from a resource that were already removed. Note the following:

  • To remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups User Guide.

  • You can only tag resources that are located in the specified region for the AWS account.

fn untag_resources( &self, input: UntagResourcesInput, diff --git a/rusoto/services/resourcegroupstaggingapi/src/lib.rs b/rusoto/services/resourcegroupstaggingapi/src/lib.rs index c36d92ff6c3..f02e2631921 100644 --- a/rusoto/services/resourcegroupstaggingapi/src/lib.rs +++ b/rusoto/services/resourcegroupstaggingapi/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of "Stack." But the value of "Stack" might be "Testing" for one and "Production" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation. For more information about tagging, see Working with Tag Editor and Working with Resource Groups. For more information about permissions you need to use the resource groups tagging APIs, see Obtaining Permissions for Resource Groups and Obtaining Permissions for Tagging .

You can use the resource groups tagging APIs to complete the following tasks:

  • Tag and untag supported resources located in the specified region for the AWS account

  • Use tag-based filters to search for resources located in the specified region for the AWS account

  • List all existing tag keys in the specified region for the AWS account

  • List all existing values for the specified key in the specified region for the AWS account

Not all resources can have tags. For a lists of resources that you can tag, see Supported Resources in the AWS Resource Groups and Tag Editor User Guide.

To make full use of the resource groups tagging APIs, you might need additional IAM permissions, including permission to access the resources of individual services as well as permission to view and apply tags to those resources. For more information, see Obtaining Permissions for Tagging in the AWS Resource Groups and Tag Editor User Guide.

+//!

Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of "Stack." But the value of "Stack" might be "Testing" for one and "Production" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

  • Tag and untag supported resources located in the specified region for the AWS account

  • Use tag-based filters to search for resources located in the specified region for the AWS account

  • List all existing tag keys in the specified region for the AWS account

  • List all existing values for the specified key in the specified region for the AWS account

To use resource groups tagging API operations, you must add the following permissions to your IAM policy:

  • tag:GetResources

  • tag:TagResources

  • tag:UntagResources

  • tag:GetTagKeys

  • tag:GetTagValues

You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.

For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

  • Alexa for Business (a4b)

  • API Gateway

  • AWS AppStream

  • AWS AppSync

  • AWS App Mesh

  • Amazon Athena

  • Amazon Aurora

  • AWS Backup

  • AWS Certificate Manager

  • AWS Certificate Manager Private CA

  • Amazon Cloud Directory

  • AWS CloudFormation

  • Amazon CloudFront

  • AWS CloudHSM

  • AWS CloudTrail

  • Amazon CloudWatch (alarms only)

  • Amazon CloudWatch Events

  • Amazon CloudWatch Logs

  • AWS CodeBuild

  • AWS CodeCommit

  • AWS CodePipeline

  • AWS CodeStar

  • Amazon Cognito Identity

  • Amazon Cognito User Pools

  • Amazon Comprehend

  • AWS Config

  • AWS Data Pipeline

  • AWS Database Migration Service

  • AWS Datasync

  • AWS Direct Connect

  • AWS Directory Service

  • Amazon DynamoDB

  • Amazon EBS

  • Amazon EC2

  • Amazon ECR

  • Amazon ECS

  • AWS Elastic Beanstalk

  • Amazon Elastic File System

  • Elastic Load Balancing

  • Amazon ElastiCache

  • Amazon Elasticsearch Service

  • AWS Elemental MediaLive

  • AWS Elemental MediaPackage

  • AWS Elemental MediaTailor

  • Amazon EMR

  • Amazon FSx

  • Amazon Glacier

  • AWS Glue

  • Amazon Inspector

  • AWS IoT Analytics

  • AWS IoT Core

  • AWS IoT Device Defender

  • AWS IoT Device Management

  • AWS IoT Greengrass

  • AWS Key Management Service

  • Amazon Kinesis

  • Amazon Kinesis Data Analytics

  • Amazon Kinesis Data Firehose

  • AWS Lambda

  • AWS License Manager

  • Amazon Machine Learning

  • Amazon MQ

  • Amazon MSK

  • Amazon Neptune

  • AWS OpsWorks

  • Amazon RDS

  • Amazon Redshift

  • AWS Resource Access Manager

  • AWS Resource Groups

  • AWS RoboMaker

  • Amazon Route 53

  • Amazon Route 53 Resolver

  • Amazon S3 (buckets only)

  • Amazon SageMaker

  • AWS Secrets Manager

  • AWS Service Catalog

  • Amazon Simple Notification Service (SNS)

  • Amazon Simple Queue Service (SQS)

  • AWS Simple System Manager (SSM)

  • AWS Step Functions

  • AWS Storage Gateway

  • AWS Transfer for SFTP

  • Amazon VPC

  • Amazon WorkSpaces

//! //! If you're using the service, you're probably looking for [ResourceGroupsTaggingApiClient](struct.ResourceGroupsTaggingApiClient.html) and [ResourceGroupsTaggingApi](trait.ResourceGroupsTaggingApi.html). diff --git a/rusoto/services/route53/Cargo.toml b/rusoto/services/route53/Cargo.toml index 98f47f7f0e2..b3058f20677 100644 --- a/rusoto/services/route53/Cargo.toml +++ b/rusoto/services/route53/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_route53" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,17 +17,19 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -xml-rs = "0.7" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/route53/README.md b/rusoto/services/route53/README.md index 1f26ffa7e69..19f712fd382 100644 --- a/rusoto/services/route53/README.md +++ b/rusoto/services/route53/README.md @@ -23,9 +23,16 @@ To use `rusoto_route53` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_route53 = "0.40.0" +rusoto_route53 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/route53/src/custom/custom_tests.rs b/rusoto/services/route53/src/custom/custom_tests.rs index c00169c5b7c..5dbbf9870be 100644 --- a/rusoto/services/route53/src/custom/custom_tests.rs +++ b/rusoto/services/route53/src/custom/custom_tests.rs @@ -1,8 +1,10 @@ extern crate rusoto_mock; -use rusoto_core::{Region, RusotoError}; -use crate::generated::{ListResourceRecordSetsError, ListResourceRecordSetsRequest, Route53, Route53Client}; use crate::custom::util::quote_txt_record; +use crate::generated::{ + ListResourceRecordSetsError, ListResourceRecordSetsRequest, Route53, Route53Client, +}; +use rusoto_core::{Region, RusotoError}; use self::rusoto_mock::*; @@ -30,11 +32,9 @@ fn test_parse_no_such_hosted_zone_error() { assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!( - RusotoError::Service( - ListResourceRecordSetsError::NoSuchHostedZone( - "No hosted zone found with ID: NO-SUCH-ZONE".to_owned() - ) - ), + RusotoError::Service(ListResourceRecordSetsError::NoSuchHostedZone( + "No hosted zone found with ID: NO-SUCH-ZONE".to_owned() + )), err ); } diff --git a/rusoto/services/route53/src/generated.rs b/rusoto/services/route53/src/generated.rs index e7d9e0ca954..95689fba8ea 100644 --- a/rusoto/services/route53/src/generated.rs +++ b/rusoto/services/route53/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -4380,7 +4379,7 @@ pub struct ListResourceRecordSetsRequest { pub hosted_zone_id: String, ///

(Optional) The maximum number of resource records sets to include in the response body for this request. If the response includes more than maxitems resource record sets, the value of the IsTruncated element in the response is true, and the values of the NextRecordName and NextRecordType elements in the response identify the first resource record set in the next group of maxitems resource record sets.

pub max_items: Option, - ///

Weighted resource record sets only: If results were truncated for a given DNS name and type, specify the value of NextRecordIdentifier from the previous response to get the next resource record set that has the current DNS name and type.

+ ///

Resource record sets that have a routing policy other than simple: If results were truncated for a given DNS name and type, specify the value of NextRecordIdentifier from the previous response to get the next resource record set that has the current DNS name and type.

pub start_record_identifier: Option, ///

The first name in the lexicographic ordering of resource record sets that you want to list.

pub start_record_name: Option, @@ -11856,10 +11855,7 @@ impl Route53Client { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> Route53Client { - Route53Client { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -11873,10 +11869,14 @@ impl Route53Client { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - Route53Client { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Route53Client { + Route53Client { client, region } } } @@ -11920,7 +11920,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11976,7 +11976,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12032,7 +12032,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12085,7 +12085,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12139,7 +12139,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12190,7 +12190,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12241,7 +12241,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12294,7 +12294,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12345,7 +12345,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12398,7 +12398,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12459,7 +12459,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12508,7 +12508,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12554,7 +12554,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12597,7 +12597,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12640,7 +12640,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12689,7 +12689,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12732,7 +12732,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12792,7 +12792,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12848,7 +12848,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12894,7 +12894,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12940,7 +12940,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12982,7 +12982,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13038,7 +13038,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13087,7 +13087,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13130,7 +13130,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13179,7 +13179,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13227,7 +13227,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13273,7 +13273,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13317,7 +13317,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13361,7 +13361,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13404,7 +13404,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13447,7 +13447,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13489,7 +13489,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13539,7 +13539,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13582,7 +13582,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13624,7 +13624,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13683,7 +13683,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13736,7 +13736,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13792,7 +13792,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13845,7 +13845,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13898,7 +13898,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13959,7 +13959,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14009,7 +14009,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14058,7 +14058,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14115,7 +14115,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14167,7 +14167,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14223,7 +14223,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14282,7 +14282,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14346,7 +14346,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14398,7 +14398,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14456,7 +14456,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14515,7 +14515,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14573,7 +14573,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14625,7 +14625,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14681,7 +14681,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14733,7 +14733,7 @@ impl Route53 for Route53Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14771,5 +14771,4 @@ mod protocol_tests { let result = client.get_hosted_zone(request).sync(); assert!(!result.is_ok(), "parse error: {:?}", result); } - } diff --git a/rusoto/services/route53domains/Cargo.toml b/rusoto/services/route53domains/Cargo.toml index e354d191f83..de6b62a1498 100644 --- a/rusoto/services/route53domains/Cargo.toml +++ b/rusoto/services/route53domains/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_route53domains" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/route53domains/README.md b/rusoto/services/route53domains/README.md index f6f5d8b2964..38c1f32eea3 100644 --- a/rusoto/services/route53domains/README.md +++ b/rusoto/services/route53domains/README.md @@ -23,9 +23,16 @@ To use `rusoto_route53domains` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_route53domains = "0.40.0" +rusoto_route53domains = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/route53domains/src/custom/mod.rs b/rusoto/services/route53domains/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/route53domains/src/custom/mod.rs +++ b/rusoto/services/route53domains/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/route53domains/src/generated.rs b/rusoto/services/route53domains/src/generated.rs index c45b0762e69..33ccc972008 100644 --- a/rusoto/services/route53domains/src/generated.rs +++ b/rusoto/services/route53domains/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Information for one billing record.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BillingRecord { ///

The date that the operation was billed, in Unix format.

#[serde(rename = "BillDate")] @@ -64,7 +63,7 @@ pub struct CheckDomainAvailabilityRequest { ///

The CheckDomainAvailability response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CheckDomainAvailabilityResponse { ///

Whether the domain name is available for registering.

You can register only domains designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLERESERVED

The domain name is reserved under specific conditions.

AVAILABLEPREORDER

The domain name is available and can be preordered.

DONTKNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLEPREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

#[serde(rename = "Availability")] @@ -85,7 +84,7 @@ pub struct CheckDomainTransferabilityRequest { ///

The CheckDomainTransferability response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CheckDomainTransferabilityResponse { ///

A complex type that contains information about whether the specified domain can be transferred to Amazon Route 53.

#[serde(rename = "Transferability")] @@ -165,7 +164,7 @@ pub struct DeleteTagsForDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTagsForDomainResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -176,7 +175,7 @@ pub struct DisableDomainAutoRenewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableDomainAutoRenewResponse {} ///

The DisableDomainTransferLock request includes the following element.

@@ -189,7 +188,7 @@ pub struct DisableDomainTransferLockRequest { ///

The DisableDomainTransferLock response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableDomainTransferLockResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -198,7 +197,7 @@ pub struct DisableDomainTransferLockResponse { ///

Information about one suggested domain name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainSuggestion { ///

Whether the domain name is available for registering.

You can register only the domains that are designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLERESERVED

The domain name is reserved under specific conditions.

AVAILABLEPREORDER

The domain name is available and can be preordered.

DONTKNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLEPREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

#[serde(rename = "Availability")] @@ -212,7 +211,7 @@ pub struct DomainSuggestion { ///

Summary information about one domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainSummary { ///

Indicates whether the domain is automatically renewed upon expiration.

#[serde(rename = "AutoRenew")] @@ -233,7 +232,7 @@ pub struct DomainSummary { ///

A complex type that contains information about whether the specified domain can be transferred to Amazon Route 53.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainTransferability { #[serde(rename = "Transferable")] #[serde(skip_serializing_if = "Option::is_none")] @@ -248,7 +247,7 @@ pub struct EnableDomainAutoRenewRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableDomainAutoRenewResponse {} ///

A request to set the transfer lock for the specified domain.

@@ -261,7 +260,7 @@ pub struct EnableDomainTransferLockRequest { ///

The EnableDomainTransferLock response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableDomainTransferLockResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -288,7 +287,7 @@ pub struct GetContactReachabilityStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetContactReachabilityStatusResponse { ///

The domain name for which you requested the reachability status.

#[serde(rename = "domainName")] @@ -310,7 +309,7 @@ pub struct GetDomainDetailRequest { ///

The GetDomainDetail response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainDetailResponse { ///

Email address to contact to report incorrect contact information for a domain, to report that the domain is being used to send spam, to report that someone is cybersquatting on a domain name, or report some other type of abuse.

#[serde(rename = "AbuseContactEmail")] @@ -407,7 +406,7 @@ pub struct GetDomainSuggestionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDomainSuggestionsResponse { ///

A list of possible domain names. If you specified true for OnlyAvailable in the request, the list contains only domains that are available for registration.

#[serde(rename = "SuggestionsList")] @@ -425,7 +424,7 @@ pub struct GetOperationDetailRequest { ///

The GetOperationDetail response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOperationDetailResponse { ///

The name of a domain.

#[serde(rename = "DomainName")] @@ -468,7 +467,7 @@ pub struct ListDomainsRequest { ///

The ListDomains response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDomainsResponse { ///

A summary of domains.

#[serde(rename = "Domains")] @@ -498,7 +497,7 @@ pub struct ListOperationsRequest { ///

The ListOperations response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOperationsResponse { ///

If there are more operations than you specified for MaxItems in the request, submit another request and include the value of NextPageMarker in the value of Marker.

#[serde(rename = "NextPageMarker")] @@ -519,7 +518,7 @@ pub struct ListTagsForDomainRequest { ///

The ListTagsForDomain response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForDomainResponse { ///

A list of the tags that are associated with the specified domain.

#[serde(rename = "TagList")] @@ -540,7 +539,7 @@ pub struct Nameserver { ///

OperationSummary includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OperationSummary { ///

Identifier returned to track the requested action.

#[serde(rename = "OperationId")] @@ -598,7 +597,7 @@ pub struct RegisterDomainRequest { ///

The RegisterDomain response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterDomainResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -621,7 +620,7 @@ pub struct RenewDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RenewDomainResponse { ///

The identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -637,7 +636,7 @@ pub struct ResendContactReachabilityEmailRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResendContactReachabilityEmailResponse { ///

The domain name for which you requested a confirmation email.

#[serde(rename = "domainName")] @@ -663,7 +662,7 @@ pub struct RetrieveDomainAuthCodeRequest { ///

The RetrieveDomainAuthCode response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RetrieveDomainAuthCodeResponse { ///

The authorization code for the domain.

#[serde(rename = "AuthCode")] @@ -733,7 +732,7 @@ pub struct TransferDomainRequest { ///

The TranserDomain response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransferDomainResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -762,7 +761,7 @@ pub struct UpdateDomainContactPrivacyRequest { ///

The UpdateDomainContactPrivacy response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainContactPrivacyResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -791,7 +790,7 @@ pub struct UpdateDomainContactRequest { ///

The UpdateDomainContact response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainContactResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -811,7 +810,7 @@ pub struct UpdateDomainNameserversRequest { ///

The UpdateDomainNameservers response includes the following element.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainNameserversResponse { ///

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

#[serde(rename = "OperationId")] @@ -831,7 +830,7 @@ pub struct UpdateTagsForDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTagsForDomainResponse {} ///

The ViewBilling request includes the following elements.

@@ -857,7 +856,7 @@ pub struct ViewBillingRequest { ///

The ViewBilling response includes the following elements.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ViewBillingResponse { ///

A summary of billing records.

#[serde(rename = "BillingRecords")] @@ -2223,10 +2222,7 @@ impl Route53DomainsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> Route53DomainsClient { - Route53DomainsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2240,10 +2236,14 @@ impl Route53DomainsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - Route53DomainsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> Route53DomainsClient { + Route53DomainsClient { client, region } } } diff --git a/rusoto/services/rustls-unit-test.sh b/rusoto/services/rustls-unit-test.sh new file mode 100755 index 00000000000..a63b9c1ccd5 --- /dev/null +++ b/rusoto/services/rustls-unit-test.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for D in `find . -maxdepth 1 -mindepth 1 -type d`; +do + (cd $D ; cargo +$1 test --no-default-features --features=rustls ) +done \ No newline at end of file diff --git a/rusoto/services/s3/Cargo.toml b/rusoto/services/s3/Cargo.toml index 6bc78ae57fb..be72fce6353 100644 --- a/rusoto/services/s3/Cargo.toml +++ b/rusoto/services/s3/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_s3" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,17 +17,19 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -xml-rs = "0.7" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/s3/README.md b/rusoto/services/s3/README.md index f98e31c6615..f5c3ba8a05d 100644 --- a/rusoto/services/s3/README.md +++ b/rusoto/services/s3/README.md @@ -23,9 +23,16 @@ To use `rusoto_s3` in your application, add it as a dependency in your `Cargo.to ```toml [dependencies] -rusoto_s3 = "0.40.0" +rusoto_s3 = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/s3/src/custom/custom_tests.rs b/rusoto/services/s3/src/custom/custom_tests.rs index c5c218da4e4..ab9e5082579 100644 --- a/rusoto/services/s3/src/custom/custom_tests.rs +++ b/rusoto/services/s3/src/custom/custom_tests.rs @@ -2,11 +2,11 @@ extern crate rusoto_mock; use crate::generated::*; +use self::rusoto_mock::*; use bytes::Bytes; use futures::{Future, Stream}; -use rusoto_core::{Region, RusotoError}; use rusoto_core::signature::SignedRequest; -use self::rusoto_mock::*; +use rusoto_core::{Region, RusotoError}; #[test] fn test_multipart_upload_copy_response() { @@ -30,8 +30,14 @@ fn test_multipart_upload_copy_response() { .upload_part_copy(upload_part_copy_req) .sync() .unwrap(); - assert!(result.copy_part_result.is_some(), "Should have result in etag field"); - assert_eq!(result.copy_part_result.unwrap().e_tag.unwrap(), "\"9a9d1bbe80188883302bff764b4cb321\""); + assert!( + result.copy_part_result.is_some(), + "Should have result in etag field" + ); + assert_eq!( + result.copy_part_result.unwrap().e_tag.unwrap(), + "\"9a9d1bbe80188883302bff764b4cb321\"" + ); } #[test] @@ -64,39 +70,51 @@ fn test_list_object_versions_with_multiple_versions() { #[test] fn initiate_multipart_upload_happy_path() { - let body = MockResponseReader::read_response("test_resources/custom", "s3_initiate_multipart_upload.xml"); + let body = MockResponseReader::read_response( + "test_resources/custom", + "s3_initiate_multipart_upload.xml", + ); let mock = MockRequestDispatcher::with_status(200).with_body(&body); let client = S3Client::new_with(mock, MockCredentialsProvider, Region::UsEast1); - let result = client.create_multipart_upload(CreateMultipartUploadRequest { - bucket: "example-bucket".to_owned(), - key: "example-object".to_owned(), - ..Default::default() - }).sync(); + let result = client + .create_multipart_upload(CreateMultipartUploadRequest { + bucket: "example-bucket".to_owned(), + key: "example-object".to_owned(), + ..Default::default() + }) + .sync(); match result { Err(_) => panic!("Couldn't parse initiate_multipart_upload"), Ok(result) => { assert_eq!(sstr("example-bucket"), result.bucket); assert_eq!(sstr("example-object"), result.key); - assert_eq!(sstr("VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"), - result.upload_id); + assert_eq!( + sstr("VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"), + result.upload_id + ); } } } #[test] fn complete_multipart_upload_happy_path() { - let body = MockResponseReader::read_response("test_resources/custom", "s3_complete_multipart_upload.xml"); + let body = MockResponseReader::read_response( + "test_resources/custom", + "s3_complete_multipart_upload.xml", + ); let mock = MockRequestDispatcher::with_status(200).with_body(&body); let client = S3Client::new_with(mock, MockCredentialsProvider, Region::UsEast1); - let result = client.complete_multipart_upload(CompleteMultipartUploadRequest { - bucket: "example-bucket".to_owned(), - key: "example-object".to_owned(), - upload_id: "VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA".to_owned(), - ..Default::default() - }).sync(); + let result = client + .complete_multipart_upload(CompleteMultipartUploadRequest { + bucket: "example-bucket".to_owned(), + key: "example-object".to_owned(), + upload_id: "VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA".to_owned(), + ..Default::default() + }) + .sync(); match result { Err(_) => panic!("Couldn't parse s3_complete_multipart_upload"), @@ -110,14 +128,17 @@ fn complete_multipart_upload_happy_path() { #[test] fn list_multipart_upload_happy_path() { - let body = MockResponseReader::read_response("test_resources/custom", "s3_list_multipart_uploads.xml"); + let body = + MockResponseReader::read_response("test_resources/custom", "s3_list_multipart_uploads.xml"); let mock = MockRequestDispatcher::with_status(200).with_body(&body); let client = S3Client::new_with(mock, MockCredentialsProvider, Region::UsEast1); - let result = client.list_multipart_uploads(ListMultipartUploadsRequest { - bucket: "example-bucket".to_owned(), - ..Default::default() - }).sync(); + let result = client + .list_multipart_uploads(ListMultipartUploadsRequest { + bucket: "example-bucket".to_owned(), + ..Default::default() + }) + .sync(); match result { Err(_) => panic!("Couldn't parse s3_list_multipart_uploads.xml"), @@ -136,8 +157,10 @@ fn list_multipart_upload_happy_path() { }; assert_eq!(an_upload.initiator.as_ref().unwrap().id, test_initiator.id); - assert_eq!(an_upload.initiator.as_ref().unwrap().display_name, - test_initiator.display_name); + assert_eq!( + an_upload.initiator.as_ref().unwrap().display_name, + test_initiator.display_name + ); assert_eq!(an_upload.initiated, sstr("2015-09-01T19:22:56.000Z")); @@ -147,8 +170,10 @@ fn list_multipart_upload_happy_path() { }; assert_eq!(an_upload.owner.as_ref().unwrap().id, test_owner.id); - assert_eq!(an_upload.owner.as_ref().unwrap().display_name, - test_owner.display_name); + assert_eq!( + an_upload.owner.as_ref().unwrap().display_name, + test_owner.display_name + ); assert_eq!(an_upload.storage_class, sstr("STANDARD")); } @@ -213,8 +238,10 @@ fn list_multipart_upload_parts_happy_path() { }; assert_eq!(result.initiator.as_ref().unwrap().id, test_initiator.id); - assert_eq!(result.initiator.as_ref().unwrap().display_name, - test_initiator.display_name); + assert_eq!( + result.initiator.as_ref().unwrap().display_name, + test_initiator.display_name + ); let test_owner = Owner { id: sstr("b84c6b0c308085829b6562b586f6664fc00faab6cfd441e90ad418ea916eed83"), @@ -222,8 +249,10 @@ fn list_multipart_upload_parts_happy_path() { }; assert_eq!(result.owner.as_ref().unwrap().id, test_owner.id); - assert_eq!(result.owner.as_ref().unwrap().display_name, - test_owner.display_name); + assert_eq!( + result.owner.as_ref().unwrap().display_name, + test_owner.display_name + ); assert_eq!(result.storage_class, sstr("STANDARD")); @@ -240,7 +269,8 @@ fn list_multipart_upload_parts_happy_path() { #[test] fn list_multipart_uploads_no_uploads() { - let mock = MockRequestDispatcher::with_status(200).with_body(r#" + let mock = MockRequestDispatcher::with_status(200).with_body( + r#" rusoto1440826568 @@ -251,7 +281,8 @@ fn list_multipart_uploads_no_uploads() { 1000 false - "#); + "#, + ); let mut req = ListMultipartUploadsRequest::default(); req.bucket = "test-bucket".to_owned(); @@ -263,13 +294,13 @@ fn list_multipart_uploads_no_uploads() { assert!(result.uploads.is_none()); } - #[cfg(nightly)] #[bench] fn bench_parse_list_buckets_response(b: &mut Bencher) { use test::Bencher; let mock = MockRequestDispatcher::with_status(200) - .with_body(r#" + .with_body( + r#" @@ -287,7 +318,8 @@ fn bench_parse_list_buckets_response(b: &mut Bencher) { - "#) + "#, + ) .with_request_checker(|request: &SignedRequest| { assert_eq!(request.method, "GET"); assert_eq!(request.path, "/"); @@ -304,7 +336,8 @@ fn bench_parse_list_buckets_response(b: &mut Bencher) { // tests the model generation and deserialization end-to-end fn should_parse_sample_list_buckets_response() { let mock = MockRequestDispatcher::with_status(200) - .with_body(r#" + .with_body( + r#" @@ -322,7 +355,8 @@ fn should_parse_sample_list_buckets_response() { - "#) + "#, + ) .with_request_checker(|request: &SignedRequest| { assert_eq!(request.method, "GET"); assert_eq!(request.path, "/"); @@ -341,8 +375,10 @@ fn should_parse_sample_list_buckets_response() { let bucket1 = buckets.get(0).unwrap(); assert_eq!(bucket1.name, Some("quotes".to_string())); - assert_eq!(bucket1.creation_date, - Some("2006-02-03T16:45:09.000Z".to_string())); + assert_eq!( + bucket1.creation_date, + Some("2006-02-03T16:45:09.000Z".to_string()) + ); } #[test] @@ -389,9 +425,15 @@ fn should_serialize_complicated_request() { .with_request_checker(|request: &SignedRequest| { assert_eq!(request.method, "GET"); assert_eq!(request.path, "/bucket/key"); - assert_eq!(*request.params.get("response-content-type").unwrap(), - sstr("response_content_type")); - assert!(request.headers.get("range").unwrap().contains(&Vec::from("range"))); + assert_eq!( + *request.params.get("response-content-type").unwrap(), + sstr("response_content_type") + ); + assert!(request + .headers + .get("range") + .unwrap() + .contains(&Vec::from("range"))); assert!(request.payload.is_none()); }); @@ -401,13 +443,18 @@ fn should_serialize_complicated_request() { #[test] fn should_parse_location_constraint() { - let body = MockResponseReader::read_response("test_resources/generated/valid", "s3-get-bucket-location.xml"); + let body = MockResponseReader::read_response( + "test_resources/generated/valid", + "s3-get-bucket-location.xml", + ); let mock = MockRequestDispatcher::with_status(200).with_body(&body); let client = S3Client::new_with(mock, MockCredentialsProvider, Region::UsEast1); - let result = client.get_bucket_location(GetBucketLocationRequest { - bucket: "example-bucket".to_owned() - }).sync(); + let result = client + .get_bucket_location(GetBucketLocationRequest { + bucket: "example-bucket".to_owned(), + }) + .sync(); match result { Err(_) => panic!("Couldn't parse get_bucket_location"), @@ -444,13 +491,14 @@ fn sstr(value: &'static str) -> Option { #[test] fn test_parse_no_such_bucket_error() { - let mock = MockRequestDispatcher::with_status(404) - .with_body(r#" + let mock = MockRequestDispatcher::with_status(404).with_body( + r#" NoSuchBucket The specified bucket does not exist 4442587FB7D0A2F9 - "#); + "#, + ); let request = ListObjectsV2Request { bucket: "no-such-bucket".to_owned(), @@ -461,5 +509,10 @@ fn test_parse_no_such_bucket_error() { let result = client.list_objects_v2(request).sync(); assert!(result.is_err()); let err = result.err().unwrap(); - assert_eq!(RusotoError::Service(ListObjectsV2Error::NoSuchBucket("The specified bucket does not exist".to_owned())), err); + assert_eq!( + RusotoError::Service(ListObjectsV2Error::NoSuchBucket( + "The specified bucket does not exist".to_owned() + )), + err + ); } diff --git a/rusoto/services/s3/src/custom/mod.rs b/rusoto/services/s3/src/custom/mod.rs index 1a971f4e227..5fd3c7fddfd 100644 --- a/rusoto/services/s3/src/custom/mod.rs +++ b/rusoto/services/s3/src/custom/mod.rs @@ -2,4 +2,4 @@ pub mod util; #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/s3/src/custom/util.rs b/rusoto/services/s3/src/custom/util.rs index 5e10ec77994..822d4eb1dae 100644 --- a/rusoto/services/s3/src/custom/util.rs +++ b/rusoto/services/s3/src/custom/util.rs @@ -1,9 +1,11 @@ -use rusoto_core::signature; -use rusoto_core::signature::SignedRequest; +use crate::generated::{ + DeleteObjectRequest, GetObjectRequest, PutObjectRequest, UploadPartRequest, +}; +use rusoto_core::credential::AwsCredentials; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::region::Region; -use rusoto_core::credential::AwsCredentials; -use crate::generated::{GetObjectRequest, PutObjectRequest, DeleteObjectRequest, UploadPartRequest}; +use rusoto_core::signature; +use rusoto_core::signature::SignedRequest; use std::time::Duration; /// URL encodes an S3 object key. This is necessary for `copy_object` and `upload_part_copy`, /// which require the `copy_source` field to be URL encoded. @@ -41,7 +43,6 @@ macro_rules! add_headers { }); } - macro_rules! add_params { ( $input:ident , $params:ident ; $p:ident , $e:expr ; $( $t:tt )* @@ -60,26 +61,35 @@ macro_rules! add_params { } pub struct PreSignedRequestOption { - pub expires_in: Duration + pub expires_in: Duration, } impl Default for PreSignedRequestOption { fn default() -> Self { Self { - expires_in: Duration::from_secs(3600) + expires_in: Duration::from_secs(3600), } } } - pub trait PreSignedRequest { /// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html - fn get_presigned_url(&self, region: &Region, credentials: &AwsCredentials, option: &PreSignedRequestOption) -> String; + fn get_presigned_url( + &self, + region: &Region, + credentials: &AwsCredentials, + option: &PreSignedRequestOption, + ) -> String; } impl PreSignedRequest for GetObjectRequest { /// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html - fn get_presigned_url(&self, region: &Region, credentials: &AwsCredentials, option: &PreSignedRequestOption) -> String { + fn get_presigned_url( + &self, + region: &Region, + credentials: &AwsCredentials, + option: &PreSignedRequestOption, + ) -> String { let request_uri = format!("/{bucket}/{key}", bucket = self.bucket, key = self.key); let mut request = SignedRequest::new("GET", "s3", ®ion, &request_uri); let mut params = Params::new(); @@ -115,7 +125,12 @@ impl PreSignedRequest for GetObjectRequest { impl PreSignedRequest for PutObjectRequest { /// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - fn get_presigned_url(&self, region: &Region, credentials: &AwsCredentials, option: &PreSignedRequestOption) -> String { + fn get_presigned_url( + &self, + region: &Region, + credentials: &AwsCredentials, + option: &PreSignedRequestOption, + ) -> String { let request_uri = format!("/{bucket}/{key}", bucket = self.bucket, key = self.key); let mut request = SignedRequest::new("PUT", "s3", ®ion, &request_uri); @@ -162,7 +177,12 @@ impl PreSignedRequest for PutObjectRequest { impl PreSignedRequest for DeleteObjectRequest { /// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html - fn get_presigned_url(&self, region: &Region, credentials: &AwsCredentials, option: &PreSignedRequestOption) -> String { + fn get_presigned_url( + &self, + region: &Region, + credentials: &AwsCredentials, + option: &PreSignedRequestOption, + ) -> String { let request_uri = format!("/{bucket}/{key}", bucket = self.bucket, key = self.key); let mut request = SignedRequest::new("DELETE", "s3", ®ion, &request_uri); let mut params = Params::new(); @@ -184,7 +204,12 @@ impl PreSignedRequest for DeleteObjectRequest { impl PreSignedRequest for UploadPartRequest { /// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html - fn get_presigned_url(&self, region: &Region, credentials: &AwsCredentials, option: &PreSignedRequestOption) -> String { + fn get_presigned_url( + &self, + region: &Region, + credentials: &AwsCredentials, + option: &PreSignedRequestOption, + ) -> String { let request_uri = format!("/{bucket}/{key}", bucket = self.bucket, key = self.key); let mut request = SignedRequest::new("PUT", "s3", ®ion, &request_uri); diff --git a/rusoto/services/s3/src/generated.rs b/rusoto/services/s3/src/generated.rs index 758ebb1d651..fa0344ee458 100644 --- a/rusoto/services/s3/src/generated.rs +++ b/rusoto/services/s3/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -2134,6 +2133,8 @@ pub struct CopyObjectOutput { pub sse_customer_algorithm: Option, ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

pub sse_customer_key_md5: Option, + ///

If present, specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

+ pub ssekms_encryption_context: Option, ///

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

pub ssekms_key_id: Option, ///

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

@@ -2219,6 +2220,8 @@ pub struct CopyObjectRequest { pub sse_customer_key: Option, ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

pub sse_customer_key_md5: Option, + ///

Specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

+ pub ssekms_encryption_context: Option, ///

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

pub ssekms_key_id: Option, ///

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

@@ -2389,6 +2392,8 @@ pub struct CreateMultipartUploadOutput { pub sse_customer_algorithm: Option, ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

pub sse_customer_key_md5: Option, + ///

If present, specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

+ pub ssekms_encryption_context: Option, ///

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

pub ssekms_key_id: Option, ///

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

@@ -2470,6 +2475,8 @@ pub struct CreateMultipartUploadRequest { pub sse_customer_key: Option, ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

pub sse_customer_key_md5: Option, + ///

Specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

+ pub ssekms_encryption_context: Option, ///

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

pub ssekms_key_id: Option, ///

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

@@ -10875,6 +10882,8 @@ pub struct PutObjectOutput { pub sse_customer_algorithm: Option, ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

pub sse_customer_key_md5: Option, + ///

If present, specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

+ pub ssekms_encryption_context: Option, ///

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

pub ssekms_key_id: Option, ///

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

@@ -10948,6 +10957,8 @@ pub struct PutObjectRequest { pub sse_customer_key: Option, ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

pub sse_customer_key_md5: Option, + ///

Specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

+ pub ssekms_encryption_context: Option, ///

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

pub ssekms_key_id: Option, ///

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

@@ -11741,7 +11752,7 @@ pub struct ReplicationRule { pub filter: Option, ///

A unique identifier for the rule. The maximum value is 255 characters.

pub id: Option, - ///

The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

  • Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap

  • Same object qualify tag based filter criteria specified in multiple rules

For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

+ ///

The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

  • Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap

  • Same object qualify tag based filter criteria specified in multiple rules

For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

pub priority: Option, ///

A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).

pub source_selection_criteria: Option, @@ -18393,7 +18404,7 @@ pub trait S3 { input: DeleteBucketPolicyRequest, ) -> RusotoFuture<(), DeleteBucketPolicyError>; - ///

Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

+ ///

Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

fn delete_bucket_replication( &self, input: DeleteBucketReplicationRequest, @@ -18834,10 +18845,7 @@ impl S3Client { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> S3Client { - S3Client { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -18851,10 +18859,14 @@ impl S3Client { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - S3Client { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> S3Client { + S3Client { client, region } } } @@ -18893,7 +18905,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -18955,7 +18967,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19155,6 +19167,13 @@ impl S3 for S3Client { ); } + if let Some(ref ssekms_encryption_context) = input.ssekms_encryption_context { + request.add_header( + "x-amz-server-side-encryption-context", + &ssekms_encryption_context.to_string(), + ); + } + if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", @@ -19206,7 +19225,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19242,6 +19261,12 @@ impl S3 for S3Client { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; + if let Some(ssekms_encryption_context) = + response.headers.get("x-amz-server-side-encryption-context") + { + let value = ssekms_encryption_context.to_owned(); + result.ssekms_encryption_context = Some(value) + }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") @@ -19335,7 +19360,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19456,6 +19481,13 @@ impl S3 for S3Client { ); } + if let Some(ref ssekms_encryption_context) = input.ssekms_encryption_context { + request.add_header( + "x-amz-server-side-encryption-context", + &ssekms_encryption_context.to_string(), + ); + } + if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", @@ -19503,7 +19535,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19539,6 +19571,12 @@ impl S3 for S3Client { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; + if let Some(ssekms_encryption_context) = + response.headers.get("x-amz-server-side-encryption-context") + { + let value = ssekms_encryption_context.to_owned(); + result.ssekms_encryption_context = Some(value) + }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") @@ -19768,7 +19806,7 @@ impl S3 for S3Client { }) } - ///

Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

+ ///

Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

#[allow(unused_variables, warnings)] fn delete_bucket_replication( &self, @@ -19895,7 +19933,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -19954,7 +19992,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20023,7 +20061,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20097,7 +20135,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20145,7 +20183,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20192,7 +20230,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20240,7 +20278,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20285,7 +20323,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20334,7 +20372,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20382,7 +20420,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20430,7 +20468,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20478,7 +20516,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20526,7 +20564,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20572,7 +20610,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20617,7 +20655,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20664,7 +20702,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20742,7 +20780,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20789,7 +20827,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20834,7 +20872,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20882,7 +20920,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20929,7 +20967,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -20977,7 +21015,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21268,7 +21306,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21323,7 +21361,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21368,7 +21406,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21422,7 +21460,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21473,7 +21511,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21560,7 +21598,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21678,7 +21716,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21861,7 +21899,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21912,7 +21950,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -21963,7 +22001,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22004,7 +22042,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22067,7 +22105,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22133,7 +22171,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22198,7 +22236,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22268,7 +22306,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -22320,7 +22358,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23147,6 +23185,13 @@ impl S3 for S3Client { ); } + if let Some(ref ssekms_encryption_context) = input.ssekms_encryption_context { + request.add_header( + "x-amz-server-side-encryption-context", + &ssekms_encryption_context.to_string(), + ); + } + if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", @@ -23198,7 +23243,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23232,6 +23277,12 @@ impl S3 for S3Client { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; + if let Some(ssekms_encryption_context) = + response.headers.get("x-amz-server-side-encryption-context") + { + let value = ssekms_encryption_context.to_owned(); + result.ssekms_encryption_context = Some(value) + }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") @@ -23331,7 +23382,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23401,7 +23452,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23471,7 +23522,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23550,7 +23601,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23610,7 +23661,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23715,7 +23766,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23768,7 +23819,8 @@ impl S3 for S3Client { ); } let mut params = Params::new(); - params.put("select&select-type", "2"); + params.put_key("select"); + params.put("select-type", "2"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); SelectObjectContentRequestSerializer::serialize( @@ -23796,7 +23848,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -23880,7 +23932,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -24042,7 +24094,7 @@ impl S3 for S3Client { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/sagemaker-runtime/Cargo.toml b/rusoto/services/sagemaker-runtime/Cargo.toml index 2d57e494f18..72c0fc9abd8 100644 --- a/rusoto/services/sagemaker-runtime/Cargo.toml +++ b/rusoto/services/sagemaker-runtime/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sagemaker_runtime" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -21,14 +21,16 @@ serde = "1.0.2" serde_derive = "1.0.2" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sagemaker-runtime/README.md b/rusoto/services/sagemaker-runtime/README.md index f35eb44bfaa..e7d27914eb3 100644 --- a/rusoto/services/sagemaker-runtime/README.md +++ b/rusoto/services/sagemaker-runtime/README.md @@ -23,9 +23,16 @@ To use `rusoto_sagemaker_runtime` in your application, add it as a dependency in ```toml [dependencies] -rusoto_sagemaker_runtime = "0.40.0" +rusoto_sagemaker_runtime = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sagemaker-runtime/src/custom/mod.rs b/rusoto/services/sagemaker-runtime/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/sagemaker-runtime/src/custom/mod.rs +++ b/rusoto/services/sagemaker-runtime/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/sagemaker-runtime/src/generated.rs b/rusoto/services/sagemaker-runtime/src/generated.rs index df3300e71a4..04bf8126f33 100644 --- a/rusoto/services/sagemaker-runtime/src/generated.rs +++ b/rusoto/services/sagemaker-runtime/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -133,10 +132,7 @@ impl SageMakerRuntimeClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SageMakerRuntimeClient { - SageMakerRuntimeClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -150,10 +146,14 @@ impl SageMakerRuntimeClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SageMakerRuntimeClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SageMakerRuntimeClient { + SageMakerRuntimeClient { client, region } } } diff --git a/rusoto/services/sagemaker/Cargo.toml b/rusoto/services/sagemaker/Cargo.toml index 21f30d78196..eb6158cae9b 100644 --- a/rusoto/services/sagemaker/Cargo.toml +++ b/rusoto/services/sagemaker/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sagemaker" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sagemaker/README.md b/rusoto/services/sagemaker/README.md index d30d1f9e0ed..a8c7a91c26b 100644 --- a/rusoto/services/sagemaker/README.md +++ b/rusoto/services/sagemaker/README.md @@ -23,9 +23,16 @@ To use `rusoto_sagemaker` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_sagemaker = "0.40.0" +rusoto_sagemaker = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sagemaker/src/custom/mod.rs b/rusoto/services/sagemaker/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/sagemaker/src/custom/mod.rs +++ b/rusoto/services/sagemaker/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/sagemaker/src/generated.rs b/rusoto/services/sagemaker/src/generated.rs index b183d80ea89..654c61bff9e 100644 --- a/rusoto/services/sagemaker/src/generated.rs +++ b/rusoto/services/sagemaker/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -35,7 +34,7 @@ pub struct AddTagsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsOutput { ///

A list of tags associated with the Amazon SageMaker resource.

#[serde(rename = "Tags")] @@ -65,7 +64,7 @@ pub struct AlgorithmSpecification { ///

Specifies the validation and image scan statuses of the algorithm.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AlgorithmStatusDetails { ///

The status of the scan of the algorithm's Docker image container.

#[serde(rename = "ImageScanStatuses")] @@ -79,7 +78,7 @@ pub struct AlgorithmStatusDetails { ///

Represents the overall status of an algorithm.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AlgorithmStatusItem { ///

if the overall status is Failed, the reason for the failure.

#[serde(rename = "FailureReason")] @@ -95,7 +94,7 @@ pub struct AlgorithmStatusItem { ///

Provides summary information about an algorithm.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AlgorithmSummary { ///

The Amazon Resource Name (ARN) of the algorithm.

#[serde(rename = "AlgorithmArn")] @@ -144,7 +143,7 @@ pub struct AlgorithmValidationSpecification { ///

Configures how labels are consolidated across human workers.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AnnotationConsolidationConfig { - ///

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

  • Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox

  • Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass

  • Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as "votes" for the correct label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation

  • Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass

For more information, see Annotation Consolidation.

+ ///

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

  • Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox

  • Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass

  • Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as "votes" for the correct label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation

  • Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass

  • Named entity eecognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition

  • Named entity eecognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

For more information, see Annotation Consolidation.

#[serde(rename = "AnnotationConsolidationLambdaArn")] pub annotation_consolidation_lambda_arn: String, } @@ -225,9 +224,21 @@ pub struct ChannelSpecification { pub supported_input_modes: Vec, } +///

Contains information about the output location for managed spot training checkpoint data.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CheckpointConfig { + ///

(Optional) The local directory where checkpoints are written. The default directory is /opt/ml/checkpoints/.

+ #[serde(rename = "LocalPath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub local_path: Option, + ///

Identifies the S3 path where you want Amazon SageMaker to store checkpoints. For example, s3://bucket-name/key-name-prefix.

+ #[serde(rename = "S3Uri")] + pub s3_uri: String, +} + ///

Specifies summary information about a Git repository.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CodeRepositorySummary { ///

The Amazon Resource Name (ARN) of the Git repository.

#[serde(rename = "CodeRepositoryArn")] @@ -263,7 +274,7 @@ pub struct CognitoMemberDefinition { ///

A summary of a model compilation job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompilationJobSummary { ///

The time when the model compilation job completed.

#[serde(rename = "CompilationEndTime")] @@ -297,7 +308,7 @@ pub struct CompilationJobSummary { ///

Describes the container, as part of model definition.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ContainerDefinition { - ///

This parameter is ignored for models that contain only a PrimaryContainer.

When a ContainerDefinition is part of an inference pipeline, the value of ths parameter uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned based on the position of the ContainerDefinition in the pipeline. If you specify a value for the ContainerHostName for any ContainerDefinition that is part of an inference pipeline, you must specify a value for the ContainerHostName parameter of every ContainerDefinition in that pipeline.

+ ///

This parameter is ignored for models that contain only a PrimaryContainer.

When a ContainerDefinition is part of an inference pipeline, the value of ths parameter uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned based on the position of the ContainerDefinition in the pipeline. If you specify a value for the ContainerHostName for any ContainerDefinition that is part of an inference pipeline, you must specify a value for the ContainerHostName parameter of every ContainerDefinition in that pipeline.

#[serde(rename = "ContainerHostname")] #[serde(skip_serializing_if = "Option::is_none")] pub container_hostname: Option, @@ -309,11 +320,11 @@ pub struct ContainerDefinition { #[serde(rename = "Image")] #[serde(skip_serializing_if = "Option::is_none")] pub image: Option, - ///

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common Parameters.

If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

If you use a built-in algorithm to create a model, Amazon SageMaker requires that you provide a S3 path to the model artifacts in ModelDataUrl.

+ ///

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common Parameters.

If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

If you use a built-in algorithm to create a model, Amazon SageMaker requires that you provide a S3 path to the model artifacts in ModelDataUrl.

#[serde(rename = "ModelDataUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub model_data_url: Option, - ///

The name of the model package to use to create the model.

+ ///

The name or Amazon Resource Name (ARN) of the model package to use to create the model.

#[serde(rename = "ModelPackageName")] #[serde(skip_serializing_if = "Option::is_none")] pub model_package_name: Option, @@ -331,7 +342,7 @@ pub struct ContinuousParameterRange { ///

The name of the continuous hyperparameter to tune.

#[serde(rename = "Name")] pub name: String, - ///

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

ReverseLogarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.

+ ///

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

ReverseLogarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.

#[serde(rename = "ScalingType")] #[serde(skip_serializing_if = "Option::is_none")] pub scaling_type: Option, @@ -375,7 +386,7 @@ pub struct CreateAlgorithmInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAlgorithmOutput { ///

The Amazon Resource Name (ARN) of the new algorithm.

#[serde(rename = "AlgorithmArn")] @@ -393,7 +404,7 @@ pub struct CreateCodeRepositoryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCodeRepositoryOutput { ///

The Amazon Resource Name (ARN) of the new repository.

#[serde(rename = "CodeRepositoryArn")] @@ -414,13 +425,13 @@ pub struct CreateCompilationJobRequest { ///

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

During model compilation, Amazon SageMaker needs your permission to:

  • Read input data from an S3 bucket

  • Write model artifacts to an S3 bucket

  • Write logs to Amazon CloudWatch Logs

  • Publish metrics to Amazon CloudWatch

You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.

#[serde(rename = "RoleArn")] pub role_arn: String, - ///

The duration allowed for model compilation.

+ ///

Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.

#[serde(rename = "StoppingCondition")] pub stopping_condition: StoppingCondition, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCompilationJobResponse { ///

If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns the following data in JSON format:

  • CompilationJobArn: The Amazon Resource Name (ARN) of the compiled job.

#[serde(rename = "CompilationJobArn")] @@ -446,7 +457,7 @@ pub struct CreateEndpointConfigInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEndpointConfigOutput { ///

The Amazon Resource Name (ARN) of the endpoint configuration.

#[serde(rename = "EndpointConfigArn")] @@ -468,7 +479,7 @@ pub struct CreateEndpointInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateEndpointOutput { ///

The Amazon Resource Name (ARN) of the endpoint.

#[serde(rename = "EndpointArn")] @@ -483,7 +494,7 @@ pub struct CreateHyperParameterTuningJobRequest { ///

The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same AWS account and AWS Region. The name must have { } to { } characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.

#[serde(rename = "HyperParameterTuningJobName")] pub hyper_parameter_tuning_job_name: String, - ///

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see AWS Tagging Strategies.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

+ ///

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see AWS Tagging Strategies.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -498,7 +509,7 @@ pub struct CreateHyperParameterTuningJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHyperParameterTuningJobResponse { ///

The Amazon Resource Name (ARN) of the tuning job. Amazon SageMaker assigns an ARN to a hyperparameter tuning job when you create it.

#[serde(rename = "HyperParameterTuningJobArn")] @@ -537,14 +548,14 @@ pub struct CreateLabelingJobRequest { #[serde(rename = "StoppingConditions")] #[serde(skip_serializing_if = "Option::is_none")] pub stopping_conditions: Option, - ///

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

+ ///

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLabelingJobResponse { ///

The Amazon Resource Name (ARN) of the labeling job. You use this ARN to identify the labeling job.

#[serde(rename = "LabelingJobArn")] @@ -582,7 +593,7 @@ pub struct CreateModelInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateModelOutput { ///

The ARN of the model created in Amazon SageMaker.

#[serde(rename = "ModelArn")] @@ -617,7 +628,7 @@ pub struct CreateModelPackageInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateModelPackageOutput { ///

The Amazon Resource Name (ARN) of the new model package.

#[serde(rename = "ModelPackageArn")] @@ -626,15 +637,15 @@ pub struct CreateModelPackageOutput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateNotebookInstanceInput { - ///

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

+ ///

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

#[serde(rename = "AcceleratorTypes")] #[serde(skip_serializing_if = "Option::is_none")] pub accelerator_types: Option>, - ///

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "AdditionalCodeRepositories")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_code_repositories: Option>, - ///

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "DefaultCodeRepository")] #[serde(skip_serializing_if = "Option::is_none")] pub default_code_repository: Option, @@ -645,7 +656,7 @@ pub struct CreateNotebookInstanceInput { ///

The type of ML compute instance to launch for the notebook instance.

#[serde(rename = "InstanceType")] pub instance_type: String, - ///

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

+ ///

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, @@ -656,7 +667,7 @@ pub struct CreateNotebookInstanceInput { ///

The name of the new notebook instance.

#[serde(rename = "NotebookInstanceName")] pub notebook_instance_name: String, - ///

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

+ ///

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissionsto to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

#[serde(rename = "RoleArn")] pub role_arn: String, ///

Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled.

Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users.

@@ -697,7 +708,7 @@ pub struct CreateNotebookInstanceLifecycleConfigInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNotebookInstanceLifecycleConfigOutput { ///

The Amazon Resource Name (ARN) of the lifecycle configuration.

#[serde(rename = "NotebookInstanceLifecycleConfigArn")] @@ -706,7 +717,7 @@ pub struct CreateNotebookInstanceLifecycleConfigOutput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNotebookInstanceOutput { ///

The Amazon Resource Name (ARN) of the notebook instance.

#[serde(rename = "NotebookInstanceArn")] @@ -726,7 +737,7 @@ pub struct CreatePresignedNotebookInstanceUrlInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePresignedNotebookInstanceUrlOutput { ///

A JSON object that contains the URL string.

#[serde(rename = "AuthorizedUrl")] @@ -739,10 +750,18 @@ pub struct CreateTrainingJobRequest { ///

The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by Amazon SageMaker, see Algorithms. For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker.

#[serde(rename = "AlgorithmSpecification")] pub algorithm_specification: AlgorithmSpecification, + ///

Contains information about the output location for managed spot training checkpoint data.

+ #[serde(rename = "CheckpointConfig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub checkpoint_config: Option, ///

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job.

#[serde(rename = "EnableInterContainerTrafficEncryption")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_inter_container_traffic_encryption: Option, + ///

To train models using managed spot training, choose True. Managed spot training provides a fully managed and scalable infrastructure for training machine learning models. this option is useful when training jobs can be interrupted and when there is flexibility when the training job is run.

The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be used as a starting point to train models incrementally. Amazon SageMaker provides metrics and logs in CloudWatch. They can be used to see when managed spot training jobs are running, interrupted, resumed, or completed.

+ #[serde(rename = "EnableManagedSpotTraining")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_managed_spot_training: Option, ///

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

#[serde(rename = "EnableNetworkIsolation")] #[serde(skip_serializing_if = "Option::is_none")] @@ -751,11 +770,11 @@ pub struct CreateTrainingJobRequest { #[serde(rename = "HyperParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub hyper_parameters: Option<::std::collections::HashMap>, - ///

An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.

Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3 location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.

Depending on the input mode that the algorithm supports, Amazon SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams.

+ ///

An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.

Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.

Depending on the input mode that the algorithm supports, Amazon SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files will be made available as input streams. They do not need to be downloaded.

#[serde(rename = "InputDataConfig")] #[serde(skip_serializing_if = "Option::is_none")] pub input_data_config: Option>, - ///

Specifies the path to the S3 bucket where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.

+ ///

Specifies the path to the S3 location where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.

#[serde(rename = "OutputDataConfig")] pub output_data_config: OutputDataConfig, ///

The resources, including the ML compute instances and ML storage volumes, to use for model training.

ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want Amazon SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1.

@@ -764,7 +783,7 @@ pub struct CreateTrainingJobRequest { ///

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

During model training, Amazon SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

#[serde(rename = "RoleArn")] pub role_arn: String, - ///

Sets a duration for training. Use this parameter to cap model training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.

When Amazon SageMaker terminates a job because the stopping condition has been met, training algorithms provided by Amazon SageMaker save the intermediate results of the job. This intermediate data is a valid model artifact. You can use it to create a model using the CreateModel API.

+ ///

Specifies a limit to how long a model training job can run. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

#[serde(rename = "StoppingCondition")] pub stopping_condition: StoppingCondition, ///

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

@@ -781,7 +800,7 @@ pub struct CreateTrainingJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTrainingJobResponse { ///

The Amazon Resource Name (ARN) of the training job.

#[serde(rename = "TrainingJobArn")] @@ -794,11 +813,15 @@ pub struct CreateTransformJobRequest { #[serde(rename = "BatchStrategy")] #[serde(skip_serializing_if = "Option::is_none")] pub batch_strategy: Option, + ///

The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records.

+ #[serde(rename = "DataProcessing")] + #[serde(skip_serializing_if = "Option::is_none")] + pub data_processing: Option, ///

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

#[serde(rename = "Environment")] #[serde(skip_serializing_if = "Option::is_none")] pub environment: Option<::std::collections::HashMap>, - ///

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the optimal settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

+ ///

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the optimal settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

#[serde(rename = "MaxConcurrentTransforms")] #[serde(skip_serializing_if = "Option::is_none")] pub max_concurrent_transforms: Option, @@ -828,7 +851,7 @@ pub struct CreateTransformJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTransformJobResponse { ///

The Amazon Resource Name (ARN) of the transform job.

#[serde(rename = "TransformJobArn")] @@ -840,14 +863,14 @@ pub struct CreateWorkteamRequest { ///

A description of the work team.

#[serde(rename = "Description")] pub description: String, - ///

A list of MemberDefinition objects that contains objects that identify the Amazon Cognito user pool that makes up the work team. For more information, see Amazon Cognito User Pools.

All of the CognitoMemberDefinition objects that make up the member definition must have the same ClientId and UserPool values.

+ ///

A list of MemberDefinition objects that contains objects that identify the Amazon Cognito user pool that makes up the work team. For more information, see Amazon Cognito User Pools.

All of the CognitoMemberDefinition objects that make up the member definition must have the same ClientId and UserPool values.

#[serde(rename = "MemberDefinitions")] pub member_definitions: Vec, ///

Configures notification of workers regarding available or expiring work items.

#[serde(rename = "NotificationConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub notification_configuration: Option, - ///

+ ///

An array of key-value pairs.

For more information, see Resource Tag and Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -857,7 +880,7 @@ pub struct CreateWorkteamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWorkteamResponse { ///

The Amazon Resource Name (ARN) of the work team. You can use this ARN to identify the work team.

#[serde(rename = "WorkteamArn")] @@ -865,9 +888,30 @@ pub struct CreateWorkteamResponse { pub workteam_arn: Option, } +///

The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DataProcessing { + ///

A JSONPath expression used to select a portion of the input data to pass to the algorithm. Use the InputFilter parameter to exclude fields, such as an ID column, from the input. If you want Amazon SageMaker to pass the entire input dataset to the algorithm, accept the default value $.

Examples: "$", "$[1:]", "$.features"

+ #[serde(rename = "InputFilter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub input_filter: Option, + ///

Specifies the source of the data to join with the transformed data. The valid values are None and Input The default value is None which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input.

For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.

+ #[serde(rename = "JoinSource")] + #[serde(skip_serializing_if = "Option::is_none")] + pub join_source: Option, + ///

A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job. If you want Amazon SageMaker to store the entire input dataset in the output file, leave the default value, $. If you specify indexes that aren't within the dimension size of the joined dataset, you get an error.

Examples: "$", "$[0,5:]", "$['id','SageMakerOutput']"

+ #[serde(rename = "OutputFilter")] + #[serde(skip_serializing_if = "Option::is_none")] + pub output_filter: Option, +} + ///

Describes the location of the channel data.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DataSource { + ///

The file system that is associated with a channel.

+ #[serde(rename = "FileSystemDataSource")] + #[serde(skip_serializing_if = "Option::is_none")] + pub file_system_data_source: Option, ///

The S3 location of the data source that is associated with a channel.

#[serde(rename = "S3DataSource")] #[serde(skip_serializing_if = "Option::is_none")] @@ -941,7 +985,7 @@ pub struct DeleteTagsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTagsOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -952,16 +996,16 @@ pub struct DeleteWorkteamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWorkteamResponse { ///

Returns true if the work team was successfully deleted; otherwise, returns false.

#[serde(rename = "Success")] pub success: bool, } -///

Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.

If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.

+///

Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.

If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeployedImage { ///

The date and time when the image path for the model resolved to the ResolvedImage

#[serde(rename = "ResolutionTime")] @@ -985,7 +1029,7 @@ pub struct DescribeAlgorithmInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAlgorithmOutput { ///

The Amazon Resource Name (ARN) of the algorithm.

#[serde(rename = "AlgorithmArn")] @@ -1035,7 +1079,7 @@ pub struct DescribeCodeRepositoryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCodeRepositoryOutput { ///

The Amazon Resource Name (ARN) of the Git repository.

#[serde(rename = "CodeRepositoryArn")] @@ -1063,7 +1107,7 @@ pub struct DescribeCompilationJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCompilationJobResponse { ///

The time when the model compilation job on a compilation job instance ended. For a successful or stopped job, this is when the job's model artifacts have finished uploading. For a failed job, this is when Amazon SageMaker detected that the job failed.

#[serde(rename = "CompilationEndTime")] @@ -1103,7 +1147,7 @@ pub struct DescribeCompilationJobResponse { ///

The Amazon Resource Name (ARN) of the model compilation job.

#[serde(rename = "RoleArn")] pub role_arn: String, - ///

The duration allowed for model compilation.

+ ///

Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.

#[serde(rename = "StoppingCondition")] pub stopping_condition: StoppingCondition, } @@ -1116,7 +1160,7 @@ pub struct DescribeEndpointConfigInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointConfigOutput { ///

A timestamp that shows when the endpoint configuration was created.

#[serde(rename = "CreationTime")] @@ -1144,7 +1188,7 @@ pub struct DescribeEndpointInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEndpointOutput { ///

A timestamp that shows when the endpoint was created.

#[serde(rename = "CreationTime")] @@ -1182,7 +1226,7 @@ pub struct DescribeHyperParameterTuningJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeHyperParameterTuningJobResponse { ///

A TrainingJobSummary object that describes the training job that completed with the best current HyperParameterTuningJobObjective.

#[serde(rename = "BestTrainingJob")] @@ -1243,7 +1287,7 @@ pub struct DescribeLabelingJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeLabelingJobResponse { ///

The date and time that the labeling job was created.

#[serde(rename = "CreationTime")] @@ -1302,7 +1346,7 @@ pub struct DescribeLabelingJobResponse { #[serde(rename = "StoppingConditions")] #[serde(skip_serializing_if = "Option::is_none")] pub stopping_conditions: Option, - ///

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

+ ///

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -1316,7 +1360,7 @@ pub struct DescribeModelInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeModelOutput { ///

The containers in the inference pipeline.

#[serde(rename = "Containers")] @@ -1356,7 +1400,7 @@ pub struct DescribeModelPackageInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeModelPackageOutput { ///

Whether the model package is certified for listing on AWS Marketplace.

#[serde(rename = "CertifyForMarketplace")] @@ -1410,7 +1454,7 @@ pub struct DescribeNotebookInstanceLifecycleConfigInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeNotebookInstanceLifecycleConfigOutput { ///

A timestamp that tells when the lifecycle configuration was created.

#[serde(rename = "CreationTime")] @@ -1439,13 +1483,13 @@ pub struct DescribeNotebookInstanceLifecycleConfigOutput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeNotebookInstanceOutput { - ///

A list of the Elastic Inference (EI) instance types associated with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

+ ///

A list of the Elastic Inference (EI) instance types associated with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

#[serde(rename = "AcceleratorTypes")] #[serde(skip_serializing_if = "Option::is_none")] pub accelerator_types: Option>, - ///

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "AdditionalCodeRepositories")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_code_repositories: Option>, @@ -1453,7 +1497,7 @@ pub struct DescribeNotebookInstanceOutput { #[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, - ///

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "DefaultCodeRepository")] #[serde(skip_serializing_if = "Option::is_none")] pub default_code_repository: Option, @@ -1531,7 +1575,7 @@ pub struct DescribeSubscribedWorkteamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSubscribedWorkteamResponse { ///

A Workteam instance that contains information about the work team.

#[serde(rename = "SubscribedWorkteam")] @@ -1546,18 +1590,29 @@ pub struct DescribeTrainingJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrainingJobResponse { ///

Information about the algorithm used for training, and algorithm metadata.

#[serde(rename = "AlgorithmSpecification")] pub algorithm_specification: AlgorithmSpecification, + ///

The billable time in seconds.

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

+ #[serde(rename = "BillableTimeInSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub billable_time_in_seconds: Option, + #[serde(rename = "CheckpointConfig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub checkpoint_config: Option, ///

A timestamp that indicates when the training job was created.

#[serde(rename = "CreationTime")] pub creation_time: f64, - ///

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

+ ///

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithms in distributed training.

#[serde(rename = "EnableInterContainerTrafficEncryption")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_inter_container_traffic_encryption: Option, + ///

A Boolean indicating whether managed spot training is enabled (True) or not (False).

+ #[serde(rename = "EnableManagedSpotTraining")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_managed_spot_training: Option, ///

If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

#[serde(rename = "EnableNetworkIsolation")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1600,14 +1655,14 @@ pub struct DescribeTrainingJobResponse { #[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option, - ///

Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition.

Amazon SageMaker provides primary statuses and secondary statuses that apply to each of them:

InProgress
  • Starting - Starting the training job.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

Valid values for SecondaryStatus are subject to change.

We no longer support the following secondary statuses:

  • LaunchingMLInstances

  • PreparingTrainingStack

  • DownloadingTrainingImage

+ ///

Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition.

Amazon SageMaker provides primary statuses and secondary statuses that apply to each of them:

InProgress
  • Starting - Starting the training job.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • MaxWaitTmeExceeded - The job stopped because it exceeded the maximum allowed wait time.

  • Interrupted - The job stopped because the managed spot training instances were interrupted.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

Valid values for SecondaryStatus are subject to change.

We no longer support the following secondary statuses:

  • LaunchingMLInstances

  • PreparingTrainingStack

  • DownloadingTrainingImage

#[serde(rename = "SecondaryStatus")] pub secondary_status: String, ///

A history of all of the secondary statuses that the training job has transitioned through.

#[serde(rename = "SecondaryStatusTransitions")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_status_transitions: Option>, - ///

The condition under which to stop the training job.

+ ///

Specifies a limit to how long a model training job can run. It also specifies the maximum time to wait for a spot instance. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

#[serde(rename = "StoppingCondition")] pub stopping_condition: StoppingCondition, ///

Indicates the time when the training job ends on training instances. You are billed for the time interval between the value of TrainingStartTime and this time. For successful jobs and stopped jobs, this is the time after model artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker detects a job failure.

@@ -1627,6 +1682,10 @@ pub struct DescribeTrainingJobResponse { #[serde(rename = "TrainingStartTime")] #[serde(skip_serializing_if = "Option::is_none")] pub training_start_time: Option, + ///

The training time in seconds.

+ #[serde(rename = "TrainingTimeInSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub training_time_in_seconds: Option, ///

The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a hyperparameter tuning job.

#[serde(rename = "TuningJobArn")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1645,7 +1704,7 @@ pub struct DescribeTransformJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTransformJobResponse { ///

Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

To enable the batch strategy, you must set SplitType to Line, RecordIO, or TFRecord.

#[serde(rename = "BatchStrategy")] @@ -1654,11 +1713,14 @@ pub struct DescribeTransformJobResponse { ///

A timestamp that shows when the transform Job was created.

#[serde(rename = "CreationTime")] pub creation_time: f64, + #[serde(rename = "DataProcessing")] + #[serde(skip_serializing_if = "Option::is_none")] + pub data_processing: Option, ///

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

#[serde(rename = "Environment")] #[serde(skip_serializing_if = "Option::is_none")] pub environment: Option<::std::collections::HashMap>, - ///

If the transform job failed, FailureReason describes why it failed. A transform job creates a log file, which includes error messages, and stores it as an Amazon S3 object. For more information, see Log Amazon SageMaker Events with Amazon CloudWatch.

+ ///

If the transform job failed, FailureReason describes why it failed. A transform job creates a log file, which includes error messages, and stores it as an Amazon S3 object. For more information, see Log Amazon SageMaker Events with Amazon CloudWatch.

#[serde(rename = "FailureReason")] #[serde(skip_serializing_if = "Option::is_none")] pub failure_reason: Option, @@ -1714,7 +1776,7 @@ pub struct DescribeWorkteamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkteamResponse { ///

A Workteam instance that contains information about the work team.

#[serde(rename = "Workteam")] @@ -1739,7 +1801,7 @@ pub struct DesiredWeightAndCapacity { ///

Provides summary information for an endpoint configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EndpointConfigSummary { ///

A timestamp that shows when the endpoint configuration was created.

#[serde(rename = "CreationTime")] @@ -1754,7 +1816,7 @@ pub struct EndpointConfigSummary { ///

Provides summary information for an endpoint.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EndpointSummary { ///

A timestamp that shows when the endpoint was created.

#[serde(rename = "CreationTime")] @@ -1773,6 +1835,23 @@ pub struct EndpointSummary { pub last_modified_time: f64, } +///

Specifies a file system data source for a channel.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FileSystemDataSource { + ///

The full path to the directory to associate with the channel.

+ #[serde(rename = "DirectoryPath")] + pub directory_path: String, + ///

The access mode of the mount of the directory associated with the channel. A directory can be mounted either in ro (read-only) or rw (read-write).

+ #[serde(rename = "FileSystemAccessMode")] + pub file_system_access_mode: String, + ///

The file system id.

+ #[serde(rename = "FileSystemId")] + pub file_system_id: String, + ///

The file system type.

+ #[serde(rename = "FileSystemType")] + pub file_system_type: String, +} + ///

A conditional statement for a search expression that includes a Boolean operator, a resource property, and a value.

If you don't specify an Operator and a Value, the filter searches for only the specified property. For example, defining a Filter for the FailureReason for the TrainingJob Resource searches for training job objects that have a value in the FailureReason field.

If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator as the default.

In search, there are several property types:

Metrics

To define a metric filter, enter a value using the form "Metrics.<name>", where <name> is a metric name. For example, the following filter searches for training jobs with an "accuracy" metric greater than "0.9":

{

"Name": "Metrics.accuracy",

"Operator": "GREATERTHAN",

"Value": "0.9"

}

HyperParameters

To define a hyperparameter filter, enter a value with the form "HyperParameters.<name>". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a "learningrate" hyperparameter that is less than "0.5":

{

"Name": "HyperParameters.learningrate",

"Operator": "LESSTHAN",

"Value": "0.5"

}

Tags

To define a tag filter, enter a value with the form "Tags.<key>".

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Filter { @@ -1791,7 +1870,7 @@ pub struct Filter { ///

Shows the final value for the objective metric for a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FinalHyperParameterTuningJobObjectiveMetric { ///

The name of the objective metric.

#[serde(rename = "MetricName")] @@ -1817,7 +1896,7 @@ pub struct GetSearchSuggestionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSearchSuggestionsResponse { ///

A list of property names for a Resource that match a SuggestionQuery.

#[serde(rename = "PropertyNameSuggestions")] @@ -1863,14 +1942,14 @@ pub struct HumanTaskConfig { ///

The number of human workers that will label an object.

#[serde(rename = "NumberOfHumanWorkersPerDataObject")] pub number_of_human_workers_per_data_object: i64, - ///

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

US East (Northern Virginia) (us-east-1):

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass

US East (Ohio) (us-east-2):

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass

US West (Oregon) (us-west-2):

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass

EU (Ireland) (eu-west-1):

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass

Asia Pacific (Tokyo (ap-northeast-1):

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass

Asia Pacific (Sydney (ap-southeast-1):

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass

+ ///

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

US East (Northern Virginia) (us-east-1):

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition

US East (Ohio) (us-east-2):

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition

US West (Oregon) (us-west-2):

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition

Canada (Central) (ca-central-1):

  • arn:awslambda:ca-central-1:918755190332:function:PRE-BoundingBox

  • arn:awslambda:ca-central-1:918755190332:function:PRE-ImageMultiClass

  • arn:awslambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation

  • arn:awslambda:ca-central-1:918755190332:function:PRE-TextMultiClass

  • arn:awslambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition

EU (Ireland) (eu-west-1):

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition

EU (London) (eu-west-2):

  • arn:awslambda:eu-west-2:487402164563:function:PRE-BoundingBox

  • arn:awslambda:eu-west-2:487402164563:function:PRE-ImageMultiClass

  • arn:awslambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation

  • arn:awslambda:eu-west-2:487402164563:function:PRE-TextMultiClass

  • arn:awslambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition

EU Frankfurt (eu-central-1):

  • arn:awslambda:eu-central-1:203001061592:function:PRE-BoundingBox

  • arn:awslambda:eu-central-1:203001061592:function:PRE-ImageMultiClass

  • arn:awslambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation

  • arn:awslambda:eu-central-1:203001061592:function:PRE-TextMultiClass

  • arn:awslambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition

Asia Pacific (Tokyo) (ap-northeast-1):

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition

Asia Pacific (Seoul) (ap-northeast-2):

  • arn:awslambda:ap-northeast-2:845288260483:function:PRE-BoundingBox

  • arn:awslambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass

  • arn:awslambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation

  • arn:awslambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass

  • arn:awslambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition

Asia Pacific (Mumbai) (ap-south-1):

  • arn:awslambda:ap-south-1:565803892007:function:PRE-BoundingBox

  • arn:awslambda:ap-south-1:565803892007:function:PRE-ImageMultiClass

  • arn:awslambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation

  • arn:awslambda:ap-south-1:565803892007:function:PRE-TextMultiClass

  • arn:awslambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition

Asia Pacific (Singapore) (ap-southeast-1):

  • arn:awslambda:ap-southeast-1:377565633583:function:PRE-BoundingBox

  • arn:awslambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass

  • arn:awslambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation

  • arn:awslambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass

  • arn:awslambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition

Asia Pacific (Sydney) (ap-southeast-2):

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition

#[serde(rename = "PreHumanTaskLambdaArn")] pub pre_human_task_lambda_arn: String, - ///

The price that you pay for each task performed by a public worker.

+ ///

The price that you pay for each task performed by an Amazon Mechanical Turk worker.

#[serde(rename = "PublicWorkforceTaskPrice")] #[serde(skip_serializing_if = "Option::is_none")] pub public_workforce_task_price: Option, - ///

The length of time that a task remains available for labelling by human workers.

+ ///

The length of time that a task remains available for labeling by human workers. If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43200). For private and vendor workforces, the maximum is as listed.

#[serde(rename = "TaskAvailabilityLifetimeInSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub task_availability_lifetime_in_seconds: Option, @@ -1952,10 +2031,17 @@ pub struct HyperParameterTrainingJobDefinition { ///

The HyperParameterAlgorithmSpecification object that specifies the resource algorithm to use for the training jobs that the tuning job launches.

#[serde(rename = "AlgorithmSpecification")] pub algorithm_specification: HyperParameterAlgorithmSpecification, + #[serde(rename = "CheckpointConfig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub checkpoint_config: Option, ///

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

#[serde(rename = "EnableInterContainerTrafficEncryption")] #[serde(skip_serializing_if = "Option::is_none")] pub enable_inter_container_traffic_encryption: Option, + ///

A Boolean indicating whether managed spot training is enabled (True) or not (False).

+ #[serde(rename = "EnableManagedSpotTraining")] + #[serde(skip_serializing_if = "Option::is_none")] + pub enable_managed_spot_training: Option, ///

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

#[serde(rename = "EnableNetworkIsolation")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1977,7 +2063,7 @@ pub struct HyperParameterTrainingJobDefinition { #[serde(rename = "StaticHyperParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub static_hyper_parameters: Option<::std::collections::HashMap>, - ///

Sets a maximum duration for the training jobs that the tuning job launches. Use this parameter to limit model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.

When Amazon SageMaker terminates a job because the stopping condition has been met, training algorithms provided by Amazon SageMaker save the intermediate results of the job.

+ ///

Specifies a limit to how long a model hyperparameter training job can run. It also specifies how long you are willing to wait for a managed spot training job to complete. When the job reaches the a limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

#[serde(rename = "StoppingCondition")] pub stopping_condition: StoppingCondition, ///

The VpcConfig object that specifies the VPC that you want the training jobs that this hyperparameter tuning job launches to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud.

@@ -1988,7 +2074,7 @@ pub struct HyperParameterTrainingJobDefinition { ///

Specifies summary information about a training job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HyperParameterTrainingJobSummary { ///

The date and time that the training job was created.

#[serde(rename = "CreationTime")] @@ -2046,10 +2132,10 @@ pub struct HyperParameterTuningJobConfig { ///

The ResourceLimits object that specifies the maximum number of training jobs and parallel training jobs for this tuning job.

#[serde(rename = "ResourceLimits")] pub resource_limits: ResourceLimits, - ///

Specifies how hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches. To use the Bayesian search stategy, set this to Bayesian. To randomly search, set it to Random. For information about search strategies, see How Hyperparameter Tuning Works.

+ ///

Specifies how hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches. To use the Bayesian search stategy, set this to Bayesian. To randomly search, set it to Random. For information about search strategies, see How Hyperparameter Tuning Works.

#[serde(rename = "Strategy")] pub strategy: String, - ///

Specifies whether to use early stopping for training jobs launched by the hyperparameter tuning job. This can be one of the following values (the default value is OFF):

OFF

Training jobs launched by the hyperparameter tuning job do not use early stopping.

AUTO

Amazon SageMaker stops training jobs launched by the hyperparameter tuning job when they are unlikely to perform better than previously completed training jobs. For more information, see Stop Training Jobs Early.

+ ///

Specifies whether to use early stopping for training jobs launched by the hyperparameter tuning job. This can be one of the following values (the default value is OFF):

OFF

Training jobs launched by the hyperparameter tuning job do not use early stopping.

AUTO

Amazon SageMaker stops training jobs launched by the hyperparameter tuning job when they are unlikely to perform better than previously completed training jobs. For more information, see Stop Training Jobs Early.

#[serde(rename = "TrainingJobEarlyStoppingType")] #[serde(skip_serializing_if = "Option::is_none")] pub training_job_early_stopping_type: Option, @@ -2068,7 +2154,7 @@ pub struct HyperParameterTuningJobObjective { ///

Provides summary information about a hyperparameter tuning job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HyperParameterTuningJobSummary { ///

The date and time that the tuning job was created.

#[serde(rename = "CreationTime")] @@ -2108,7 +2194,7 @@ pub struct HyperParameterTuningJobSummary { ///

Specifies the configuration for a hyperparameter tuning job that uses one or more previous hyperparameter tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job.

All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric, and the training job that performs the best is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job.

All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct HyperParameterTuningJobWarmStartConfig { - ///

An array of hyperparameter tuning jobs that are used as the starting point for the new hyperparameter tuning job. For more information about warm starting a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job as a Starting Point.

Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent jobs for warm start tuning jobs.

+ ///

An array of hyperparameter tuning jobs that are used as the starting point for the new hyperparameter tuning job. For more information about warm starting a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job as a Starting Point.

Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent jobs for warm start tuning jobs.

#[serde(rename = "ParentHyperParameterTuningJobs")] pub parent_hyper_parameter_tuning_jobs: Vec, ///

Specifies one of the following:

IDENTICALDATAANDALGORITHM

The new hyperparameter tuning job uses the same input data and training image as the parent tuning jobs. You can change the hyperparameter ranges to search and the maximum number of training jobs that the hyperparameter tuning job launches. You cannot use a new version of the training algorithm, unless the changes in the new version do not affect the algorithm itself. For example, changes that improve logging or adding support for a different data format are allowed. You can also change hyperparameters from tunable to static, and from static to tunable, but the total number of static plus tunable hyperparameters must remain the same as it is in all parent jobs. The objective metric for the new tuning job must be the same as for all parent jobs.

TRANSFERLEARNING

The new hyperparameter tuning job can include input data, hyperparameter ranges, maximum number of concurrent training jobs, and maximum number of training jobs that are different than those of its parent hyperparameter tuning jobs. The training image can also be a different version from the version used in the parent hyperparameter tuning job. You can also change hyperparameters from tunable to static, and from static to tunable, but the total number of static plus tunable hyperparameters must remain the same as it is in all parent jobs. The objective metric for the new tuning job must be the same as for all parent jobs.

@@ -2162,7 +2248,7 @@ pub struct IntegerParameterRange { ///

The name of the hyperparameter to search.

#[serde(rename = "Name")] pub name: String, - ///

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

+ ///

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

#[serde(rename = "ScalingType")] #[serde(skip_serializing_if = "Option::is_none")] pub scaling_type: Option, @@ -2181,7 +2267,7 @@ pub struct IntegerParameterRangeSpecification { ///

Provides a breakdown of the number of objects labeled.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelCounters { ///

The total number of objects that could not be labeled due to an error.

#[serde(rename = "FailedNonRetryableError")] @@ -2207,7 +2293,7 @@ pub struct LabelCounters { ///

Provides counts for human-labeled tasks in the labeling job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelCountersForWorkteam { ///

The total number of data objects labeled by a human worker.

#[serde(rename = "HumanLabeled")] @@ -2230,7 +2316,7 @@ pub struct LabelingJobAlgorithmsConfig { #[serde(rename = "InitialActiveLearningModelArn")] #[serde(skip_serializing_if = "Option::is_none")] pub initial_active_learning_model_arn: Option, - ///

Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You must select one of the following ARNs:

  • Image classification

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification

  • Text classification

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification

  • Object detection

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection

+ ///

Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You must select one of the following ARNs:

  • Image classification

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification

  • Text classification

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification

  • Object detection

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection

  • Semantic Segmentation

    arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/semantic-segmentation

#[serde(rename = "LabelingJobAlgorithmSpecificationArn")] pub labeling_job_algorithm_specification_arn: String, ///

Provides configuration information for a labeling job.

@@ -2258,7 +2344,7 @@ pub struct LabelingJobDataSource { ///

Provides summary information for a work team.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelingJobForWorkteamSummary { ///

The date and time that the labeling job was created.

#[serde(rename = "CreationTime")] @@ -2297,7 +2383,7 @@ pub struct LabelingJobInputConfig { ///

Specifies the location of the output produced by the labeling job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelingJobOutput { ///

The Amazon Resource Name (ARN) for the most recent Amazon SageMaker model trained as part of automated data labeling.

#[serde(rename = "FinalActiveLearningModelArn")] @@ -2311,7 +2397,7 @@ pub struct LabelingJobOutput { ///

Output configuration information for a labeling job.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct LabelingJobOutputConfig { - ///

The AWS Key Management Service ID of the key used to encrypt the output data, if any.

+ ///

The AWS Key Management Service ID of the key used to encrypt the output data, if any.

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for LabelingJobOutputConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to "aws:kms". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateLabelingJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, @@ -2323,7 +2409,7 @@ pub struct LabelingJobOutputConfig { ///

Provides configuration information for labeling jobs.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct LabelingJobResourceConfig { - ///

The AWS Key Management Service key ID for the key used to encrypt the output data, if any.

+ ///

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:

  • // KMS Key ID

    "1234abcd-12ab-34cd-56ef-1234567890ab"

  • // Amazon Resource Name (ARN) of a KMS Key

    "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"

#[serde(rename = "VolumeKmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub volume_kms_key_id: Option, @@ -2352,9 +2438,9 @@ pub struct LabelingJobStoppingConditions { ///

Provides summary information about a labeling job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelingJobSummary { - ///

The Amazon Resource Name (ARN) of the Lambda function used to consolidate the annotations from individual workers into a label for a data object. For more information, see Annotation Consolidation.

+ ///

The Amazon Resource Name (ARN) of the Lambda function used to consolidate the annotations from individual workers into a label for a data object. For more information, see Annotation Consolidation.

#[serde(rename = "AnnotationConsolidationLambdaArn")] #[serde(skip_serializing_if = "Option::is_none")] pub annotation_consolidation_lambda_arn: Option, @@ -2429,7 +2515,7 @@ pub struct ListAlgorithmsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAlgorithmsOutput { ///

>An array of AlgorithmSummary objects, each of which lists an algorithm.

#[serde(rename = "AlgorithmSummaryList")] @@ -2481,7 +2567,7 @@ pub struct ListCodeRepositoriesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCodeRepositoriesOutput { ///

Gets a list of summaries of the Git repositories. Each summary specifies the following values for the repository:

  • Name

  • Amazon Resource Name (ARN)

  • Creation time

  • Last modified time

  • Configuration information, including the URL location of the repository and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

#[serde(rename = "CodeRepositorySummaryList")] @@ -2537,7 +2623,7 @@ pub struct ListCompilationJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCompilationJobsResponse { ///

An array of CompilationJobSummary objects, each describing a model compilation job.

#[serde(rename = "CompilationJobSummaries")] @@ -2581,7 +2667,7 @@ pub struct ListEndpointConfigsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEndpointConfigsOutput { ///

An array of endpoint configurations.

#[serde(rename = "EndpointConfigs")] @@ -2637,7 +2723,7 @@ pub struct ListEndpointsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListEndpointsOutput { ///

An array or endpoint objects.

#[serde(rename = "Endpoints")] @@ -2693,7 +2779,7 @@ pub struct ListHyperParameterTuningJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListHyperParameterTuningJobsResponse { ///

A list of HyperParameterTuningJobSummary objects that describe the tuning jobs that the ListHyperParameterTuningJobs request returned.

#[serde(rename = "HyperParameterTuningJobSummaries")] @@ -2740,7 +2826,7 @@ pub struct ListLabelingJobsForWorkteamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLabelingJobsForWorkteamResponse { ///

An array of LabelingJobSummary objects, each describing a labeling job.

#[serde(rename = "LabelingJobSummaryList")] @@ -2796,7 +2882,7 @@ pub struct ListLabelingJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLabelingJobsResponse { ///

An array of LabelingJobSummary objects, each describing a labeling job.

#[serde(rename = "LabelingJobSummaryList")] @@ -2841,7 +2927,7 @@ pub struct ListModelPackagesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListModelPackagesOutput { ///

An array of ModelPackageSummary objects, each of which lists a model package.

#[serde(rename = "ModelPackageSummaryList")] @@ -2885,7 +2971,7 @@ pub struct ListModelsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListModelsOutput { ///

An array of ModelSummary objects, each of which lists a model.

#[serde(rename = "Models")] @@ -2937,7 +3023,7 @@ pub struct ListNotebookInstanceLifecycleConfigsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListNotebookInstanceLifecycleConfigsOutput { ///

If the response is truncated, Amazon SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request.

#[serde(rename = "NextToken")] @@ -3006,7 +3092,7 @@ pub struct ListNotebookInstancesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListNotebookInstancesOutput { ///

If the response to the previous ListNotebookInstances request was truncated, Amazon SageMaker returns this token. To retrieve the next set of notebook instances, use the token in the next request.

#[serde(rename = "NextToken")] @@ -3035,7 +3121,7 @@ pub struct ListSubscribedWorkteamsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSubscribedWorkteamsResponse { ///

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of work teams, use it in the subsequent request.

#[serde(rename = "NextToken")] @@ -3062,7 +3148,7 @@ pub struct ListTagsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsOutput { ///

If response is truncated, Amazon SageMaker includes a token in the response. You can use this token in your subsequent request to fetch next set of tokens.

#[serde(rename = "NextToken")] @@ -3102,7 +3188,7 @@ pub struct ListTrainingJobsForHyperParameterTuningJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTrainingJobsForHyperParameterTuningJobResponse { ///

If the result of this ListTrainingJobsForHyperParameterTuningJob request was truncated, the response includes a NextToken. To retrieve the next set of training jobs, use the token in the next request.

#[serde(rename = "NextToken")] @@ -3158,7 +3244,7 @@ pub struct ListTrainingJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTrainingJobsResponse { ///

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of training jobs, use it in the subsequent request.

#[serde(rename = "NextToken")] @@ -3214,7 +3300,7 @@ pub struct ListTransformJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTransformJobsResponse { ///

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of transform jobs, use it in the next request.

#[serde(rename = "NextToken")] @@ -3250,7 +3336,7 @@ pub struct ListWorkteamsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWorkteamsResponse { ///

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of work teams, use it in the subsequent request.

#[serde(rename = "NextToken")] @@ -3272,7 +3358,7 @@ pub struct MemberDefinition { ///

The name, value, and date and time of a metric that was emitted to Amazon CloudWatch.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MetricData { ///

The name of the metric.

#[serde(rename = "MetricName")] @@ -3288,7 +3374,7 @@ pub struct MetricData { pub value: Option, } -///

Specifies a metric that the training algorithm writes to stderr or stdout. Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.

+///

Specifies a metric that the training algorithm writes to stderr or stdout . Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MetricDefinition { ///

The name of the metric.

@@ -3301,7 +3387,7 @@ pub struct MetricDefinition { ///

Provides information about the location that is configured for storing model artifacts.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModelArtifacts { ///

The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz.

#[serde(rename = "S3ModelArtifacts")] @@ -3334,7 +3420,7 @@ pub struct ModelPackageContainerDefinition { ///

Specifies the validation and image scan statuses of the model package.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModelPackageStatusDetails { ///

The status of the scan of the Docker image container for the model package.

#[serde(rename = "ImageScanStatuses")] @@ -3347,7 +3433,7 @@ pub struct ModelPackageStatusDetails { ///

Represents the overall status of a model package.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModelPackageStatusItem { ///

if the overall status is Failed, the reason for the failure.

#[serde(rename = "FailureReason")] @@ -3363,7 +3449,7 @@ pub struct ModelPackageStatusItem { ///

Provides summary information about a model package.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModelPackageSummary { ///

A timestamp that shows when the model package was created.

#[serde(rename = "CreationTime")] @@ -3407,7 +3493,7 @@ pub struct ModelPackageValidationSpecification { ///

Provides summary information about a model.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModelSummary { ///

A timestamp that indicates when the model was created.

#[serde(rename = "CreationTime")] @@ -3426,14 +3512,14 @@ pub struct NestedFilters { ///

A list of filters. Each filter acts on a property. Filters must contain at least one Filters value. For example, a NestedFilters call might include a filter on the PropertyName parameter of the InputDataConfig property: InputDataConfig.DataSource.S3DataSource.S3Uri.

#[serde(rename = "Filters")] pub filters: Vec, - ///

The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig.

+ ///

The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig .

#[serde(rename = "NestedPropertyName")] pub nested_property_name: String, } ///

Provides a summary of a notebook instance lifecycle configuration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotebookInstanceLifecycleConfigSummary { ///

A timestamp that tells when the lifecycle configuration was created.

#[serde(rename = "CreationTime")] @@ -3462,9 +3548,9 @@ pub struct NotebookInstanceLifecycleHook { ///

Provides summary information for an Amazon SageMaker notebook instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotebookInstanceSummary { - ///

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "AdditionalCodeRepositories")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_code_repositories: Option>, @@ -3472,7 +3558,7 @@ pub struct NotebookInstanceSummary { #[serde(rename = "CreationTime")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_time: Option, - ///

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "DefaultCodeRepository")] #[serde(skip_serializing_if = "Option::is_none")] pub default_code_repository: Option, @@ -3515,7 +3601,7 @@ pub struct NotificationConfiguration { ///

Specifies the number of training jobs that this hyperparameter tuning job launched, categorized by the status of their objective metric. The objective metric status shows whether the final objective metric for the training job has been evaluated by the tuning job and used in the hyperparameter tuning process.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ObjectiveStatusCounters { ///

The number of training jobs whose final objective metric was not evaluated and used in the hyperparameter tuning process. This typically occurs when the training job failed or did not emit an objective metric.

#[serde(rename = "Failed")] @@ -3545,7 +3631,7 @@ pub struct OutputConfig { ///

Provides information about how to store model training results (model artifacts).

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct OutputDataConfig { - ///

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

  • // KMS Key ID

    "1234abcd-12ab-34cd-56ef-1234567890ab"

  • // Amazon Resource Name (ARN) of a KMS Key

    "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"

  • // KMS Key Alias

    "alias/ExampleAlias"

  • // Amazon Resource Name (ARN) of a KMS Key Alias

    "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"

If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

+ ///

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

  • // KMS Key ID

    "1234abcd-12ab-34cd-56ef-1234567890ab"

  • // Amazon Resource Name (ARN) of a KMS Key

    "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"

  • // KMS Key Alias

    "alias/ExampleAlias"

  • // Amazon Resource Name (ARN) of a KMS Key Alias

    "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to "aws:kms". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

#[serde(rename = "KmsKeyId")] #[serde(skip_serializing_if = "Option::is_none")] pub kms_key_id: Option, @@ -3600,7 +3686,7 @@ pub struct ParentHyperParameterTuningJob { ///

Identifies a model that you want to host and the resources to deploy for hosting it. If you are deploying multiple models, tell Amazon SageMaker how to distribute traffic among the models by specifying variant weights.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProductionVariant { - ///

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker. For more information, see Using Elastic Inference in Amazon SageMaker.

+ ///

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker.

#[serde(rename = "AcceleratorType")] #[serde(skip_serializing_if = "Option::is_none")] pub accelerator_type: Option, @@ -3624,7 +3710,7 @@ pub struct ProductionVariant { ///

Describes weight and capacities for a production variant associated with an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities API and the endpoint status is Updating, you get different desired and current values.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProductionVariantSummary { ///

The number of instances associated with the variant.

#[serde(rename = "CurrentInstanceCount")] @@ -3661,7 +3747,7 @@ pub struct PropertyNameQuery { ///

A property name returned from a GetSearchSuggestions call that specifies a value in the PropertyNameQuery field.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PropertyNameSuggestion { ///

A suggested property name based on what you entered in the search textbox in the Amazon SageMaker console.

#[serde(rename = "PropertyName")] @@ -3669,10 +3755,10 @@ pub struct PropertyNameSuggestion { pub property_name: Option, } -///

Defines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.

Use one of the following prices for bounding box tasks. Prices are in US dollars.

  • 0.036

  • 0.048

  • 0.060

  • 0.072

  • 0.120

  • 0.240

  • 0.360

  • 0.480

  • 0.600

  • 0.720

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for image classification, text classification, and custom tasks. Prices are in US dollars.

  • 0.012

  • 0.024

  • 0.036

  • 0.048

  • 0.060

  • 0.072

  • 0.120

  • 0.240

  • 0.360

  • 0.480

  • 0.600

  • 0.720

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for semantic segmentation tasks. Prices are in US dollars.

  • 0.840

  • 0.960

  • 1.080

  • 1.200

+///

Defines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.

Use one of the following prices for bounding box tasks. Prices are in US dollars and should be based on the complexity of the task; the longer it takes in your initial testing, the more you should offer.

  • 0.036

  • 0.048

  • 0.060

  • 0.072

  • 0.120

  • 0.240

  • 0.360

  • 0.480

  • 0.600

  • 0.720

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for image classification, text classification, and custom tasks. Prices are in US dollars.

  • 0.012

  • 0.024

  • 0.036

  • 0.048

  • 0.060

  • 0.072

  • 0.120

  • 0.240

  • 0.360

  • 0.480

  • 0.600

  • 0.720

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for semantic segmentation tasks. Prices are in US dollars.

  • 0.840

  • 0.960

  • 1.080

  • 1.200

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PublicWorkforceTaskPrice { - ///

Defines the amount of money paid to a worker in United States dollars.

+ ///

Defines the amount of money paid to an Amazon Mechanical Turk worker in United States dollars.

#[serde(rename = "AmountInUsd")] #[serde(skip_serializing_if = "Option::is_none")] pub amount_in_usd: Option, @@ -3692,7 +3778,7 @@ pub struct RenderUiTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RenderUiTemplateResponse { ///

A list of one or more RenderingError objects if any were encountered while rendering the template. If there were no errors, the list is empty.

#[serde(rename = "Errors")] @@ -3712,7 +3798,7 @@ pub struct RenderableTask { ///

A description of an error that occurred while rendering the template.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RenderingError { ///

A unique identifier for a specific class of errors.

#[serde(rename = "Code")] @@ -3793,7 +3879,7 @@ pub struct SearchExpression { ///

An individual search result record that contains a single resource object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchRecord { ///

A TrainingJob object that is returned as part of a Search request.

#[serde(rename = "TrainingJob")] @@ -3829,7 +3915,7 @@ pub struct SearchRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchResponse { ///

If the result of the previous Search request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request.

#[serde(rename = "NextToken")] @@ -3843,7 +3929,7 @@ pub struct SearchResponse { ///

An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides additional details about a status that the training job has transitioned through. A training job can be in one of several states, for example, starting, downloading, training, or uploading. Within each state, there are a number of intermediate states. For example, within the starting state, Amazon SageMaker could be starting the training job or launching the ML instances. These transitional states are referred to as the job's secondary status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecondaryStatusTransition { ///

A timestamp that shows when the training job transitioned out of this secondary status state into another secondary status state or when the training job has ended.

#[serde(rename = "EndTime")] @@ -3855,7 +3941,7 @@ pub struct SecondaryStatusTransition { ///

Contains a secondary status information from a training job.

Status might be one of the following secondary statuses:

InProgress
  • Starting - Starting the training job.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

We no longer support the following secondary statuses:

  • LaunchingMLInstances

  • PreparingTrainingStack

  • DownloadingTrainingImage

#[serde(rename = "Status")] pub status: String, - ///

A detailed description of the progress within a secondary status.

Amazon SageMaker provides secondary statuses and status messages that apply to each of them:

Starting
  • Starting the training job.

  • Launching requested ML instances.

  • Insufficient capacity error from EC2 while launching instances, retrying!

  • Launched instance was unhealthy, replacing it!

  • Preparing the instances for training.

Training
  • Downloading the training image.

  • Training image download completed. Training in progress.

Status messages are subject to change. Therefore, we recommend not including them in code that programmatically initiates actions. For examples, don't use status messages in if statements.

To have an overview of your training job's progress, view TrainingJobStatus and SecondaryStatus in DescribeTrainingJobResponse, and StatusMessage together. For example, at the start of a training job, you might see the following:

  • TrainingJobStatus - InProgress

  • SecondaryStatus - Training

  • StatusMessage - Downloading the training image

+ ///

A detailed description of the progress within a secondary status.

Amazon SageMaker provides secondary statuses and status messages that apply to each of them:

Starting
  • Starting the training job.

  • Launching requested ML instances.

  • Insufficient capacity error from EC2 while launching instances, retrying!

  • Launched instance was unhealthy, replacing it!

  • Preparing the instances for training.

Training
  • Downloading the training image.

  • Training image download completed. Training in progress.

Status messages are subject to change. Therefore, we recommend not including them in code that programmatically initiates actions. For examples, don't use status messages in if statements.

To have an overview of your training job's progress, view TrainingJobStatus and SecondaryStatus in DescribeTrainingJob, and StatusMessage together. For example, at the start of a training job, you might see the following:

  • TrainingJobStatus - InProgress

  • SecondaryStatus - Training

  • StatusMessage - Downloading the training image

#[serde(rename = "StatusMessage")] #[serde(skip_serializing_if = "Option::is_none")] pub status_message: Option, @@ -3938,18 +4024,22 @@ pub struct StopTransformJobRequest { pub transform_job_name: String, } -///

Specifies how long model training can run. When model training reaches the limit, Amazon SageMaker ends the training job. Use this API to cap model training cost.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of training is not lost.

Training algorithms provided by Amazon SageMaker automatically saves the intermediate results of a model training job (it is best effort case, as model might not be ready to save as some stages, for example training just started). This intermediate data is a valid model artifact. You can use it to create a model (CreateModel).

+///

Specifies a limit to how long a model training or compilation job can run. It also specifies how long you are willing to wait for a managed spot training job to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StoppingCondition { - ///

The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 28 days.

+ ///

The maximum length of time, in seconds, that the training or compilation job can run. If job does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. The maximum value is 28 days.

#[serde(rename = "MaxRuntimeInSeconds")] #[serde(skip_serializing_if = "Option::is_none")] pub max_runtime_in_seconds: Option, + ///

The maximum length of time, in seconds, how long you are willing to wait for a managed spot training job to complete. It is the amount of time spent waiting for Spot capacity plus the amount of time the training job runs. It must be equal to or greater than MaxRuntimeInSeconds.

+ #[serde(rename = "MaxWaitTimeInSeconds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_wait_time_in_seconds: Option, } ///

Describes a work team of a vendor that does the a labelling job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribedWorkteam { ///

#[serde(rename = "ListingId")] @@ -3994,7 +4084,7 @@ pub struct Tag { ///

Contains information about a training job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrainingJob { ///

Information about the algorithm used for training, and algorithm metadata.

#[serde(rename = "AlgorithmSpecification")] @@ -4060,7 +4150,7 @@ pub struct TrainingJob { #[serde(rename = "SecondaryStatusTransitions")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_status_transitions: Option>, - ///

The condition under which to stop the training job.

+ ///

Specifies a limit to how long a model training job can run. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

#[serde(rename = "StoppingCondition")] #[serde(skip_serializing_if = "Option::is_none")] pub stopping_condition: Option, @@ -4114,7 +4204,7 @@ pub struct TrainingJobDefinition { ///

The resources, including the ML compute instances and ML storage volumes, to use for model training.

#[serde(rename = "ResourceConfig")] pub resource_config: ResourceConfig, - ///

Sets a duration for training. Use this parameter to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.

+ ///

Specifies a limit to how long a model training job can run. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts.

#[serde(rename = "StoppingCondition")] pub stopping_condition: StoppingCondition, ///

The input mode used by the algorithm for the training job. For the input modes that Amazon SageMaker algorithms support, see Algorithms.

If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.

@@ -4124,7 +4214,7 @@ pub struct TrainingJobDefinition { ///

The numbers of training jobs launched by a hyperparameter tuning job, categorized by status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrainingJobStatusCounters { ///

The number of completed training jobs launched by the hyperparameter tuning job.

#[serde(rename = "Completed")] @@ -4150,7 +4240,7 @@ pub struct TrainingJobStatusCounters { ///

Provides summary information about a training job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrainingJobSummary { ///

A timestamp that shows when the training job was created.

#[serde(rename = "CreationTime")] @@ -4268,7 +4358,7 @@ pub struct TransformJobDefinition { ///

Provides a summary of a transform job. Multiple TransformJobSummary objects are returned as a list after in response to a ListTransformJobs call.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TransformJobSummary { ///

A timestamp that shows when the transform Job was created.

#[serde(rename = "CreationTime")] @@ -4319,10 +4409,10 @@ pub struct TransformOutput { ///

Describes the resources, including ML instance types and ML instance count, to use for transform job.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TransformResources { - ///

The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1.

+ ///

The number of ML compute instances to use in the transform job. For distributed transform jobs, specify a value greater than 1. The default value is 1.

#[serde(rename = "InstanceCount")] pub instance_count: i64, - ///

The ML compute instance type for the transform job. For using built-in algorithms to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should suffice. There is no default value for InstanceType.

+ ///

The ML compute instance type for the transform job. If you are using built-in algorithms to transform moderately sized datasets, we recommend using ml.m4.xlarge or ml.m5.large instance types.

#[serde(rename = "InstanceType")] pub instance_type: String, ///

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:

  • // KMS Key ID

    "1234abcd-12ab-34cd-56ef-1234567890ab"

  • // Amazon Resource Name (ARN) of a KMS Key

    "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"

@@ -4362,7 +4452,7 @@ pub struct USD { ///

Provided configuration information for the worker UI for a labeling job.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct UiConfig { - ///

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

+ ///

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

#[serde(rename = "UiTemplateS3Uri")] pub ui_template_s3_uri: String, } @@ -4387,7 +4477,7 @@ pub struct UpdateCodeRepositoryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCodeRepositoryOutput { ///

The ARN of the Git repository.

#[serde(rename = "CodeRepositoryArn")] @@ -4405,7 +4495,7 @@ pub struct UpdateEndpointInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEndpointOutput { ///

The Amazon Resource Name (ARN) of the endpoint.

#[serde(rename = "EndpointArn")] @@ -4423,7 +4513,7 @@ pub struct UpdateEndpointWeightsAndCapacitiesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEndpointWeightsAndCapacitiesOutput { ///

The Amazon Resource Name (ARN) of the updated endpoint.

#[serde(rename = "EndpointArn")] @@ -4432,15 +4522,15 @@ pub struct UpdateEndpointWeightsAndCapacitiesOutput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateNotebookInstanceInput { - ///

A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

+ ///

A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

#[serde(rename = "AcceleratorTypes")] #[serde(skip_serializing_if = "Option::is_none")] pub accelerator_types: Option>, - ///

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "AdditionalCodeRepositories")] #[serde(skip_serializing_if = "Option::is_none")] pub additional_code_repositories: Option>, - ///

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

+ ///

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

#[serde(rename = "DefaultCodeRepository")] #[serde(skip_serializing_if = "Option::is_none")] pub default_code_repository: Option, @@ -4479,7 +4569,7 @@ pub struct UpdateNotebookInstanceInput { #[serde(rename = "RootAccess")] #[serde(skip_serializing_if = "Option::is_none")] pub root_access: Option, - ///

The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB.

+ ///

The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so Amazon SageMaker can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size.

#[serde(rename = "VolumeSizeInGB")] #[serde(skip_serializing_if = "Option::is_none")] pub volume_size_in_gb: Option, @@ -4490,22 +4580,22 @@ pub struct UpdateNotebookInstanceLifecycleConfigInput { ///

The name of the lifecycle configuration.

#[serde(rename = "NotebookInstanceLifecycleConfigName")] pub notebook_instance_lifecycle_config_name: String, - ///

The shell script that runs only once, when you create a notebook instance

+ ///

The shell script that runs only once, when you create a notebook instance. The shell script must be a base64-encoded string.

#[serde(rename = "OnCreate")] #[serde(skip_serializing_if = "Option::is_none")] pub on_create: Option>, - ///

The shell script that runs every time you start a notebook instance, including when you create the notebook instance.

+ ///

The shell script that runs every time you start a notebook instance, including when you create the notebook instance. The shell script must be a base64-encoded string.

#[serde(rename = "OnStart")] #[serde(skip_serializing_if = "Option::is_none")] pub on_start: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNotebookInstanceLifecycleConfigOutput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNotebookInstanceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -4528,7 +4618,7 @@ pub struct UpdateWorkteamRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateWorkteamResponse { ///

A Workteam object that describes the updated work team.

#[serde(rename = "Workteam")] @@ -4548,7 +4638,7 @@ pub struct VpcConfig { ///

Provides details about a labeling work team.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Workteam { ///

The date and time that the work team was created (timestamp).

#[serde(rename = "CreateDate")] @@ -4564,6 +4654,7 @@ pub struct Workteam { ///

The Amazon Cognito user groups that make up the work team.

#[serde(rename = "MemberDefinitions")] pub member_definitions: Vec, + ///

Configures SNS notifications of available or expiring work items for work teams.

#[serde(rename = "NotificationConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub notification_configuration: Option, @@ -6825,7 +6916,7 @@ pub trait SageMaker { input: CreateAlgorithmInput, ) -> RusotoFuture; - ///

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

+ ///

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

fn create_code_repository( &self, input: CreateCodeRepositoryInput, @@ -6837,7 +6928,7 @@ pub trait SageMaker { input: CreateCompilationJobRequest, ) -> RusotoFuture; - ///

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

+ ///

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

fn create_endpoint( &self, input: CreateEndpointInput, @@ -6855,7 +6946,7 @@ pub trait SageMaker { input: CreateHyperParameterTuningJobRequest, ) -> RusotoFuture; - ///

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the AWS Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

+ ///

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the AWS Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

fn create_labeling_job( &self, input: CreateLabelingJobRequest, @@ -6873,7 +6964,7 @@ pub trait SageMaker { input: CreateModelPackageInput, ) -> RusotoFuture; - ///

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

+ ///

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

fn create_notebook_instance( &self, input: CreateNotebookInstanceInput, @@ -6888,7 +6979,7 @@ pub trait SageMaker { CreateNotebookInstanceLifecycleConfigError, >; - ///

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook instance. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

+ ///

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.For example, you can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

fn create_presigned_notebook_instance_url( &self, input: CreatePresignedNotebookInstanceUrlInput, @@ -6897,7 +6988,7 @@ pub trait SageMaker { CreatePresignedNotebookInstanceUrlError, >; - ///

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to influence the quality of the final model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3 location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - Sets a duration for training. Use this parameter to cap model training costs.

For more information about Amazon SageMaker, see How It Works.

+ ///

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • EnableManagedSpotTraining - Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds to specify how long you are willing to to wait for a managed spot training job to complete.

For more information about Amazon SageMaker, see How It Works.

fn create_training_job( &self, input: CreateTrainingJobRequest, @@ -7284,10 +7375,7 @@ impl SageMakerClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SageMakerClient { - SageMakerClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7301,10 +7389,14 @@ impl SageMakerClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SageMakerClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SageMakerClient { + SageMakerClient { client, region } } } @@ -7363,7 +7455,7 @@ impl SageMaker for SageMakerClient { }) } - ///

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

+ ///

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

fn create_code_repository( &self, input: CreateCodeRepositoryInput, @@ -7419,7 +7511,7 @@ impl SageMaker for SageMakerClient { }) } - ///

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

+ ///

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

fn create_endpoint( &self, input: CreateEndpointInput, @@ -7503,7 +7595,7 @@ impl SageMaker for SageMakerClient { }) } - ///

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the AWS Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

+ ///

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the AWS Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

fn create_labeling_job( &self, input: CreateLabelingJobRequest, @@ -7590,7 +7682,7 @@ impl SageMaker for SageMakerClient { }) } - ///

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

+ ///

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

fn create_notebook_instance( &self, input: CreateNotebookInstanceInput, @@ -7652,7 +7744,7 @@ impl SageMaker for SageMakerClient { }) } - ///

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook instance. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

+ ///

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.For example, you can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

fn create_presigned_notebook_instance_url( &self, input: CreatePresignedNotebookInstanceUrlInput, @@ -7686,7 +7778,7 @@ impl SageMaker for SageMakerClient { }) } - ///

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to influence the quality of the final model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3 location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - Sets a duration for training. Use this parameter to cap model training costs.

For more information about Amazon SageMaker, see How It Works.

+ ///

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • EnableManagedSpotTraining - Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds to specify how long you are willing to to wait for a managed spot training job to complete.

For more information about Amazon SageMaker, see How It Works.

fn create_training_job( &self, input: CreateTrainingJobRequest, diff --git a/rusoto/services/sdb/Cargo.toml b/rusoto/services/sdb/Cargo.toml index d1ccfc5969f..02e96deb78b 100644 --- a/rusoto/services/sdb/Cargo.toml +++ b/rusoto/services/sdb/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sdb" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sdb/README.md b/rusoto/services/sdb/README.md index 53c8bd84190..d10badff0f7 100644 --- a/rusoto/services/sdb/README.md +++ b/rusoto/services/sdb/README.md @@ -23,9 +23,16 @@ To use `rusoto_sdb` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_sdb = "0.40.0" +rusoto_sdb = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sdb/src/custom/mod.rs b/rusoto/services/sdb/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/sdb/src/custom/mod.rs +++ b/rusoto/services/sdb/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/sdb/src/generated.rs b/rusoto/services/sdb/src/generated.rs index dd48e6a27e9..87bf9eff6c7 100644 --- a/rusoto/services/sdb/src/generated.rs +++ b/rusoto/services/sdb/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -1644,10 +1643,7 @@ impl SimpleDbClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SimpleDbClient { - SimpleDbClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1661,10 +1657,14 @@ impl SimpleDbClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SimpleDbClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SimpleDbClient { + SimpleDbClient { client, region } } } @@ -1832,7 +1832,7 @@ impl SimpleDb for SimpleDbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1883,7 +1883,7 @@ impl SimpleDb for SimpleDbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1934,7 +1934,7 @@ impl SimpleDb for SimpleDbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -2007,7 +2007,7 @@ impl SimpleDb for SimpleDbClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/secretsmanager/Cargo.toml b/rusoto/services/secretsmanager/Cargo.toml index c2755736817..aa8c3b41735 100644 --- a/rusoto/services/secretsmanager/Cargo.toml +++ b/rusoto/services/secretsmanager/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_secretsmanager" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/secretsmanager/README.md b/rusoto/services/secretsmanager/README.md index 4fc4fa35179..9aae21d5b99 100644 --- a/rusoto/services/secretsmanager/README.md +++ b/rusoto/services/secretsmanager/README.md @@ -23,9 +23,16 @@ To use `rusoto_secretsmanager` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_secretsmanager = "0.40.0" +rusoto_secretsmanager = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/secretsmanager/src/custom/mod.rs b/rusoto/services/secretsmanager/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/secretsmanager/src/custom/mod.rs +++ b/rusoto/services/secretsmanager/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/secretsmanager/src/generated.rs b/rusoto/services/secretsmanager/src/generated.rs index 634b0a12e5d..c0f9773a7ec 100644 --- a/rusoto/services/secretsmanager/src/generated.rs +++ b/rusoto/services/secretsmanager/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -32,7 +31,7 @@ pub struct CancelRotateSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelRotateSecretResponse { ///

The ARN of the secret for which rotation was canceled.

#[serde(rename = "ARN")] @@ -85,7 +84,7 @@ pub struct CreateSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSecretResponse { ///

The Amazon Resource Name (ARN) of the secret that you just created.

Secrets Manager automatically adds several random characters to the name at the end of the ARN when you initially create a secret. This affects only the ARN and not the actual friendly name. This ensures that if you create a new secret with the same name as an old secret that you previously deleted, then users with access to the old secret don't automatically get access to the new secret because the ARNs are different.

#[serde(rename = "ARN")] @@ -109,7 +108,7 @@ pub struct DeleteResourcePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResourcePolicyResponse { ///

The ARN of the secret that the resource-based policy was deleted for.

#[serde(rename = "ARN")] @@ -137,7 +136,7 @@ pub struct DeleteSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSecretResponse { ///

The ARN of the secret that is now scheduled for deletion.

#[serde(rename = "ARN")] @@ -161,7 +160,7 @@ pub struct DescribeSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSecretResponse { ///

The ARN of the secret.

#[serde(rename = "ARN")] @@ -195,6 +194,9 @@ pub struct DescribeSecretResponse { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + #[serde(rename = "OwningService")] + #[serde(skip_serializing_if = "Option::is_none")] + pub owning_service: Option, ///

Specifies whether automatic rotation is enabled for this secret.

To enable rotation, use RotateSecret with AutomaticallyRotateAfterDays set to a value greater than 0. To disable rotation, use CancelRotateSecret.

#[serde(rename = "RotationEnabled")] #[serde(skip_serializing_if = "Option::is_none")] @@ -254,7 +256,7 @@ pub struct GetRandomPasswordRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRandomPasswordResponse { ///

A string with the generated password.

#[serde(rename = "RandomPassword")] @@ -270,7 +272,7 @@ pub struct GetResourcePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourcePolicyResponse { ///

The ARN of the secret that the resource-based policy was retrieved for.

#[serde(rename = "ARN")] @@ -302,7 +304,7 @@ pub struct GetSecretValueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSecretValueResponse { ///

The ARN of the secret.

#[serde(rename = "ARN")] @@ -359,7 +361,7 @@ pub struct ListSecretVersionIdsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSecretVersionIdsResponse { ///

The Amazon Resource Name (ARN) for the secret.

Secrets Manager automatically adds several random characters to the name at the end of the ARN when you initially create a secret. This affects only the ARN and not the actual friendly name. This ensures that if you create a new secret with the same name as an old secret that you previously deleted, then users with access to the old secret don't automatically get access to the new secret because the ARNs are different.

#[serde(rename = "ARN")] @@ -392,7 +394,7 @@ pub struct ListSecretsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSecretsResponse { ///

If present in the response, this value indicates that there's more output available than what's included in the current response. This can occur even when the response includes no values at all, such as when you ask for a filtered view of a very long list. Use this value in the NextToken request parameter in a subsequent call to the operation to continue processing and get the next part of the output. You should repeat this until the NextToken response element comes back empty (as null).

#[serde(rename = "NextToken")] @@ -415,7 +417,7 @@ pub struct PutResourcePolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutResourcePolicyResponse { ///

The ARN of the secret that the resource-based policy was retrieved for.

#[serde(rename = "ARN")] @@ -456,7 +458,7 @@ pub struct PutSecretValueRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutSecretValueResponse { ///

The Amazon Resource Name (ARN) for the secret for which you just created a version.

#[serde(rename = "ARN")] @@ -484,7 +486,7 @@ pub struct RestoreSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreSecretResponse { ///

The ARN of the secret that was restored.

#[serde(rename = "ARN")] @@ -516,7 +518,7 @@ pub struct RotateSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RotateSecretResponse { ///

The ARN of the secret.

#[serde(rename = "ARN")] @@ -543,7 +545,7 @@ pub struct RotationRulesType { ///

A structure that contains the details about a secret. It does not include the encrypted SecretString and SecretBinary values. To get those values, use the GetSecretValue operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecretListEntry { ///

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the AWS Secrets Manager User Guide.

#[serde(rename = "ARN")] @@ -577,6 +579,9 @@ pub struct SecretListEntry { #[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + #[serde(rename = "OwningService")] + #[serde(skip_serializing_if = "Option::is_none")] + pub owning_service: Option, ///

Indicated whether automatic, scheduled rotation is enabled for this secret.

#[serde(rename = "RotationEnabled")] #[serde(skip_serializing_if = "Option::is_none")] @@ -601,7 +606,7 @@ pub struct SecretListEntry { ///

A structure that contains information about one version of a secret.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SecretVersionsListEntry { ///

The date and time this version of the secret was created.

#[serde(rename = "CreatedDate")] @@ -687,7 +692,7 @@ pub struct UpdateSecretRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSecretResponse { ///

The ARN of the secret that was updated.

Secrets Manager automatically adds several random characters to the name at the end of the ARN when you initially create a secret. This affects only the ARN and not the actual friendly name. This ensures that if you create a new secret with the same name as an old secret that you previously deleted, then users with access to the old secret don't automatically get access to the new secret because the ARNs are different.

#[serde(rename = "ARN")] @@ -722,7 +727,7 @@ pub struct UpdateSecretVersionStageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSecretVersionStageResponse { ///

The ARN of the secret with the staging label that was modified.

#[serde(rename = "ARN")] @@ -1856,10 +1861,7 @@ impl SecretsManagerClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SecretsManagerClient { - SecretsManagerClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1873,10 +1875,14 @@ impl SecretsManagerClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SecretsManagerClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SecretsManagerClient { + SecretsManagerClient { client, region } } } diff --git a/rusoto/services/securityhub/Cargo.toml b/rusoto/services/securityhub/Cargo.toml new file mode 100644 index 00000000000..0fd8bbe2d9c --- /dev/null +++ b/rusoto/services/securityhub/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - AWS SecurityHub @ 2018-10-26" +documentation = "https://docs.rs/rusoto_securityhub" +keywords = ["AWS", "Amazon", "securityhub"] +license = "MIT" +name = "rusoto_securityhub" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/securityhub/README.md b/rusoto/services/securityhub/README.md new file mode 100644 index 00000000000..18fe8dc8a26 --- /dev/null +++ b/rusoto/services/securityhub/README.md @@ -0,0 +1,52 @@ + +# Rusoto SecurityHub +Rust SDK for AWS SecurityHub + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_securityhub` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_securityhub = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_securityhub "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/securityhub/src/custom/mod.rs b/rusoto/services/securityhub/src/custom/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rusoto/services/securityhub/src/generated.rs b/rusoto/services/securityhub/src/generated.rs new file mode 100644 index 00000000000..49cfdcb3150 --- /dev/null +++ b/rusoto/services/securityhub/src/generated.rs @@ -0,0 +1,5407 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::param::{Params, ServiceParams}; +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct AcceptInvitationRequest { + ///

The ID of the invitation sent from the Security Hub master account.

+ #[serde(rename = "InvitationId")] + pub invitation_id: String, + ///

The account ID of the Security Hub master account that sent the invitation.

+ #[serde(rename = "MasterId")] + pub master_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct AcceptInvitationResponse {} + +///

The details of an AWS account.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct AccountDetails { + ///

The ID of an AWS account.

+ #[serde(rename = "AccountId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_id: Option, + ///

The email of an AWS account.

+ #[serde(rename = "Email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, +} + +///

An ActionTarget object.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ActionTarget { + ///

The ARN for the target action.

+ #[serde(rename = "ActionTargetArn")] + pub action_target_arn: String, + ///

The description of the target action.

+ #[serde(rename = "Description")] + pub description: String, + ///

The name of the action target.

+ #[serde(rename = "Name")] + pub name: String, +} + +///

The details of an Amazon EC2 instance.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsEc2InstanceDetails { + ///

The IAM profile ARN of the instance.

+ #[serde(rename = "IamInstanceProfileArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub iam_instance_profile_arn: Option, + ///

The Amazon Machine Image (AMI) ID of the instance.

+ #[serde(rename = "ImageId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_id: Option, + ///

The IPv4 addresses associated with the instance.

+ #[serde(rename = "IpV4Addresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ip_v4_addresses: Option>, + ///

The IPv6 addresses associated with the instance.

+ #[serde(rename = "IpV6Addresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ip_v6_addresses: Option>, + ///

The key name associated with the instance.

+ #[serde(rename = "KeyName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub key_name: Option, + ///

The date/time the instance was launched.

+ #[serde(rename = "LaunchedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub launched_at: Option, + ///

The identifier of the subnet that the instance was launched in.

+ #[serde(rename = "SubnetId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub subnet_id: Option, + ///

The instance type of the instance.

+ #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option, + ///

The identifier of the VPC that the instance was launched in.

+ #[serde(rename = "VpcId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub vpc_id: Option, +} + +///

IAM access key details related to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsIamAccessKeyDetails { + ///

The creation date/time of the IAM access key related to a finding.

+ #[serde(rename = "CreatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_at: Option, + ///

The status of the IAM access key related to a finding.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + ///

The user associated with the IAM access key related to a finding.

+ #[serde(rename = "UserName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_name: Option, +} + +///

The details of an Amazon S3 bucket.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsS3BucketDetails { + ///

The canonical user ID of the owner of the S3 bucket.

+ #[serde(rename = "OwnerId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub owner_id: Option, + ///

The display name of the owner of the S3 bucket.

+ #[serde(rename = "OwnerName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub owner_name: Option, +} + +///

Provides consistent format for the contents of the Security Hub-aggregated findings. AwsSecurityFinding format enables you to share findings between AWS security services and third-party solutions, and compliance checks.

A finding is a potential security issue generated either by AWS services (Amazon GuardDuty, Amazon Inspector, and Amazon Macie) or by the integrated third-party solutions and compliance checks.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsSecurityFinding { + ///

The AWS account ID that a finding is generated in.

+ #[serde(rename = "AwsAccountId")] + pub aws_account_id: String, + ///

This data type is exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, CIS AWS Foundations). Contains compliance-related finding details.

+ #[serde(rename = "Compliance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub compliance: Option, + ///

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

+ #[serde(rename = "Confidence")] + #[serde(skip_serializing_if = "Option::is_none")] + pub confidence: Option, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider created the potential security issue that a finding captured.

+ #[serde(rename = "CreatedAt")] + pub created_at: String, + ///

The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

+ #[serde(rename = "Criticality")] + #[serde(skip_serializing_if = "Option::is_none")] + pub criticality: Option, + ///

A finding's description.

In this release, Description is a required property.

+ #[serde(rename = "Description")] + pub description: String, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider first observed the potential security issue that a finding captured.

+ #[serde(rename = "FirstObservedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub first_observed_at: Option, + ///

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.

+ #[serde(rename = "GeneratorId")] + pub generator_id: String, + ///

The security findings provider-specific identifier for a finding.

+ #[serde(rename = "Id")] + pub id: String, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider most recently observed the potential security issue that a finding captured.

+ #[serde(rename = "LastObservedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_observed_at: Option, + ///

A list of malware related to a finding.

+ #[serde(rename = "Malware")] + #[serde(skip_serializing_if = "Option::is_none")] + pub malware: Option>, + ///

The details of network-related information about a finding.

+ #[serde(rename = "Network")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network: Option, + ///

A user-defined note added to a finding.

+ #[serde(rename = "Note")] + #[serde(skip_serializing_if = "Option::is_none")] + pub note: Option, + ///

The details of process-related information about a finding.

+ #[serde(rename = "Process")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process: Option, + ///

The ARN generated by Security Hub that uniquely identifies a third-party company (security-findings provider) after this provider's product (solution that generates findings) is registered with Security Hub.

+ #[serde(rename = "ProductArn")] + pub product_arn: String, + ///

A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format.

+ #[serde(rename = "ProductFields")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_fields: Option<::std::collections::HashMap>, + ///

The record state of a finding.

+ #[serde(rename = "RecordState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub record_state: Option, + ///

A list of related findings.

+ #[serde(rename = "RelatedFindings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub related_findings: Option>, + ///

A data type that describes the remediation options for a finding.

+ #[serde(rename = "Remediation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub remediation: Option, + ///

A set of resource data types that describe the resources that the finding refers to.

+ #[serde(rename = "Resources")] + pub resources: Vec, + ///

The schema version that a finding is formatted for.

+ #[serde(rename = "SchemaVersion")] + pub schema_version: String, + ///

A finding's severity.

+ #[serde(rename = "Severity")] + pub severity: Severity, + ///

A URL that links to a page about the current finding in the security-findings provider's solution.

+ #[serde(rename = "SourceUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_url: Option, + ///

Threat intel details related to a finding.

+ #[serde(rename = "ThreatIntelIndicators")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicators: Option>, + ///

A finding's title.

In this release, Title is a required property.

+ #[serde(rename = "Title")] + pub title: String, + ///

One or more finding types in the format of namespace/category/classifier that classify a finding.

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications

+ #[serde(rename = "Types")] + pub types: Vec, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider last updated the finding record.

+ #[serde(rename = "UpdatedAt")] + pub updated_at: String, + ///

A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.

+ #[serde(rename = "UserDefinedFields")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_defined_fields: Option<::std::collections::HashMap>, + ///

Indicates the veracity of a finding.

+ #[serde(rename = "VerificationState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub verification_state: Option, + ///

The workflow state of a finding.

+ #[serde(rename = "WorkflowState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_state: Option, +} + +///

A collection of attributes that are applied to all active Security Hub-aggregated findings and that result in a subset of findings that are included in this insight.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AwsSecurityFindingFilters { + ///

The AWS account ID that a finding is generated in.

+ #[serde(rename = "AwsAccountId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub aws_account_id: Option>, + ///

The name of the findings provider (company) that owns the solution (product) that generates findings.

+ #[serde(rename = "CompanyName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub company_name: Option>, + ///

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, CIS AWS Foundations). Contains compliance-related finding details.

+ #[serde(rename = "ComplianceStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub compliance_status: Option>, + ///

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

+ #[serde(rename = "Confidence")] + #[serde(skip_serializing_if = "Option::is_none")] + pub confidence: Option>, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider captured the potential security issue that a finding captured.

+ #[serde(rename = "CreatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub created_at: Option>, + ///

The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

+ #[serde(rename = "Criticality")] + #[serde(skip_serializing_if = "Option::is_none")] + pub criticality: Option>, + ///

A finding's description.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option>, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider first observed the potential security issue that a finding captured.

+ #[serde(rename = "FirstObservedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub first_observed_at: Option>, + ///

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.

+ #[serde(rename = "GeneratorId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub generator_id: Option>, + ///

The security findings provider-specific identifier for a finding.

+ #[serde(rename = "Id")] + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option>, + ///

A keyword for a finding.

+ #[serde(rename = "Keyword")] + #[serde(skip_serializing_if = "Option::is_none")] + pub keyword: Option>, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider most recently observed the potential security issue that a finding captured.

+ #[serde(rename = "LastObservedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_observed_at: Option>, + ///

The name of the malware that was observed.

+ #[serde(rename = "MalwareName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub malware_name: Option>, + ///

The filesystem path of the malware that was observed.

+ #[serde(rename = "MalwarePath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub malware_path: Option>, + ///

The state of the malware that was observed.

+ #[serde(rename = "MalwareState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub malware_state: Option>, + ///

The type of the malware that was observed.

+ #[serde(rename = "MalwareType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub malware_type: Option>, + ///

The destination domain of network-related information about a finding.

+ #[serde(rename = "NetworkDestinationDomain")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_destination_domain: Option>, + ///

The destination IPv4 address of network-related information about a finding.

+ #[serde(rename = "NetworkDestinationIpV4")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_destination_ip_v4: Option>, + ///

The destination IPv6 address of network-related information about a finding.

+ #[serde(rename = "NetworkDestinationIpV6")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_destination_ip_v6: Option>, + ///

The destination port of network-related information about a finding.

+ #[serde(rename = "NetworkDestinationPort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_destination_port: Option>, + ///

Indicates the direction of network traffic associated with a finding.

+ #[serde(rename = "NetworkDirection")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_direction: Option>, + ///

The protocol of network-related information about a finding.

+ #[serde(rename = "NetworkProtocol")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_protocol: Option>, + ///

The source domain of network-related information about a finding.

+ #[serde(rename = "NetworkSourceDomain")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_source_domain: Option>, + ///

The source IPv4 address of network-related information about a finding.

+ #[serde(rename = "NetworkSourceIpV4")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_source_ip_v4: Option>, + ///

The source IPv6 address of network-related information about a finding.

+ #[serde(rename = "NetworkSourceIpV6")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_source_ip_v6: Option>, + ///

The source media access control (MAC) address of network-related information about a finding.

+ #[serde(rename = "NetworkSourceMac")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_source_mac: Option>, + ///

The source port of network-related information about a finding.

+ #[serde(rename = "NetworkSourcePort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub network_source_port: Option>, + ///

The text of a note.

+ #[serde(rename = "NoteText")] + #[serde(skip_serializing_if = "Option::is_none")] + pub note_text: Option>, + ///

The timestamp of when the note was updated.

+ #[serde(rename = "NoteUpdatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub note_updated_at: Option>, + ///

The principal that created a note.

+ #[serde(rename = "NoteUpdatedBy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub note_updated_by: Option>, + ///

The date/time that the process was launched.

+ #[serde(rename = "ProcessLaunchedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process_launched_at: Option>, + ///

The name of the process.

+ #[serde(rename = "ProcessName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process_name: Option>, + ///

The parent process ID.

+ #[serde(rename = "ProcessParentPid")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process_parent_pid: Option>, + ///

The path to the process executable.

+ #[serde(rename = "ProcessPath")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process_path: Option>, + ///

The process ID.

+ #[serde(rename = "ProcessPid")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process_pid: Option>, + ///

The date/time that the process was terminated.

+ #[serde(rename = "ProcessTerminatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub process_terminated_at: Option>, + ///

The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub.

+ #[serde(rename = "ProductArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_arn: Option>, + ///

A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format.

+ #[serde(rename = "ProductFields")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_fields: Option>, + ///

The name of the solution (product) that generates findings.

+ #[serde(rename = "ProductName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_name: Option>, + ///

The recommendation of what to do about the issue described in a finding.

+ #[serde(rename = "RecommendationText")] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommendation_text: Option>, + ///

The updated record state for the finding.

+ #[serde(rename = "RecordState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub record_state: Option>, + ///

The solution-generated identifier for a related finding.

+ #[serde(rename = "RelatedFindingsId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub related_findings_id: Option>, + ///

The ARN of the solution that generated a related finding.

+ #[serde(rename = "RelatedFindingsProductArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub related_findings_product_arn: Option>, + ///

The IAM profile ARN of the instance.

+ #[serde(rename = "ResourceAwsEc2InstanceIamInstanceProfileArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_iam_instance_profile_arn: Option>, + ///

The Amazon Machine Image (AMI) ID of the instance.

+ #[serde(rename = "ResourceAwsEc2InstanceImageId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_image_id: Option>, + ///

The IPv4 addresses associated with the instance.

+ #[serde(rename = "ResourceAwsEc2InstanceIpV4Addresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_ip_v4_addresses: Option>, + ///

The IPv6 addresses associated with the instance.

+ #[serde(rename = "ResourceAwsEc2InstanceIpV6Addresses")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_ip_v6_addresses: Option>, + ///

The key name associated with the instance.

+ #[serde(rename = "ResourceAwsEc2InstanceKeyName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_key_name: Option>, + ///

The date/time the instance was launched.

+ #[serde(rename = "ResourceAwsEc2InstanceLaunchedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_launched_at: Option>, + ///

The identifier of the subnet that the instance was launched in.

+ #[serde(rename = "ResourceAwsEc2InstanceSubnetId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_subnet_id: Option>, + ///

The instance type of the instance.

+ #[serde(rename = "ResourceAwsEc2InstanceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_type: Option>, + ///

The identifier of the VPC that the instance was launched in.

+ #[serde(rename = "ResourceAwsEc2InstanceVpcId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_ec_2_instance_vpc_id: Option>, + ///

The creation date/time of the IAM access key related to a finding.

+ #[serde(rename = "ResourceAwsIamAccessKeyCreatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_iam_access_key_created_at: Option>, + ///

The status of the IAM access key related to a finding.

+ #[serde(rename = "ResourceAwsIamAccessKeyStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_iam_access_key_status: Option>, + ///

The user associated with the IAM access key related to a finding.

+ #[serde(rename = "ResourceAwsIamAccessKeyUserName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_iam_access_key_user_name: Option>, + ///

The canonical user ID of the owner of the S3 bucket.

+ #[serde(rename = "ResourceAwsS3BucketOwnerId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_s3_bucket_owner_id: Option>, + ///

The display name of the owner of the S3 bucket.

+ #[serde(rename = "ResourceAwsS3BucketOwnerName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_aws_s3_bucket_owner_name: Option>, + ///

The identifier of the image related to a finding.

+ #[serde(rename = "ResourceContainerImageId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_container_image_id: Option>, + ///

The name of the image related to a finding.

+ #[serde(rename = "ResourceContainerImageName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_container_image_name: Option>, + ///

The date/time that the container was started.

+ #[serde(rename = "ResourceContainerLaunchedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_container_launched_at: Option>, + ///

The name of the container related to a finding.

+ #[serde(rename = "ResourceContainerName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_container_name: Option>, + ///

The details of a resource that doesn't have a specific subfield for the resource type defined.

+ #[serde(rename = "ResourceDetailsOther")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_details_other: Option>, + ///

The canonical identifier for the given resource type.

+ #[serde(rename = "ResourceId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id: Option>, + ///

The canonical AWS partition name that the Region is assigned to.

+ #[serde(rename = "ResourcePartition")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_partition: Option>, + ///

The canonical AWS external Region name where this resource is located.

+ #[serde(rename = "ResourceRegion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_region: Option>, + ///

A list of AWS tags associated with a resource at the time the finding was processed.

+ #[serde(rename = "ResourceTags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_tags: Option>, + ///

Specifies the type of the resource that details are provided for.

+ #[serde(rename = "ResourceType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_type: Option>, + ///

The label of a finding's severity.

+ #[serde(rename = "SeverityLabel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub severity_label: Option>, + ///

The normalized severity of a finding.

+ #[serde(rename = "SeverityNormalized")] + #[serde(skip_serializing_if = "Option::is_none")] + pub severity_normalized: Option>, + ///

The native severity as defined by the security-findings provider's solution that generated the finding.

+ #[serde(rename = "SeverityProduct")] + #[serde(skip_serializing_if = "Option::is_none")] + pub severity_product: Option>, + ///

A URL that links to a page about the current finding in the security-findings provider's solution.

+ #[serde(rename = "SourceUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_url: Option>, + ///

The category of a threat intel indicator.

+ #[serde(rename = "ThreatIntelIndicatorCategory")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicator_category: Option>, + ///

The date/time of the last observation of a threat intel indicator.

+ #[serde(rename = "ThreatIntelIndicatorLastObservedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicator_last_observed_at: Option>, + ///

The source of the threat intel.

+ #[serde(rename = "ThreatIntelIndicatorSource")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicator_source: Option>, + ///

The URL for more details from the source of the threat intel.

+ #[serde(rename = "ThreatIntelIndicatorSourceUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicator_source_url: Option>, + ///

The type of a threat intel indicator.

+ #[serde(rename = "ThreatIntelIndicatorType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicator_type: Option>, + ///

The value of a threat intel indicator.

+ #[serde(rename = "ThreatIntelIndicatorValue")] + #[serde(skip_serializing_if = "Option::is_none")] + pub threat_intel_indicator_value: Option>, + ///

A finding's title.

+ #[serde(rename = "Title")] + #[serde(skip_serializing_if = "Option::is_none")] + pub title: Option>, + ///

A finding type in the format of namespace/category/classifier that classifies a finding.

+ #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option>, + ///

An ISO8601-formatted timestamp that indicates when the security-findings provider last updated the finding record.

+ #[serde(rename = "UpdatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub updated_at: Option>, + ///

A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.

+ #[serde(rename = "UserDefinedFields")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_defined_fields: Option>, + ///

The veracity of a finding.

+ #[serde(rename = "VerificationState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub verification_state: Option>, + ///

The workflow state of a finding.

+ #[serde(rename = "WorkflowState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_state: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchDisableStandardsRequest { + ///

The ARNs of the standards subscriptions to disable.

+ #[serde(rename = "StandardsSubscriptionArns")] + pub standards_subscription_arns: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchDisableStandardsResponse { + ///

The details of the standards subscriptions that were disabled.

+ #[serde(rename = "StandardsSubscriptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub standards_subscriptions: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchEnableStandardsRequest { + ///

The list of standards compliance checks to enable.

In this release, Security Hub supports only the CIS AWS Foundations standard.

The ARN for the standard is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0.

+ #[serde(rename = "StandardsSubscriptionRequests")] + pub standards_subscription_requests: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchEnableStandardsResponse { + ///

The details of the standards subscriptions that were enabled.

+ #[serde(rename = "StandardsSubscriptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub standards_subscriptions: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct BatchImportFindingsRequest { + ///

A list of findings to import. To successfully import a finding, it must follow the AWS Security Finding Format.

+ #[serde(rename = "Findings")] + pub findings: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BatchImportFindingsResponse { + ///

The number of findings that failed to import.

+ #[serde(rename = "FailedCount")] + pub failed_count: i64, + ///

The list of the findings that failed to import.

+ #[serde(rename = "FailedFindings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_findings: Option>, + ///

The number of findings that were successfully imported.

+ #[serde(rename = "SuccessCount")] + pub success_count: i64, +} + +///

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, CIS AWS Foundations). Contains compliance-related finding details.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Compliance { + ///

The result of a compliance check.

+ #[serde(rename = "Status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, +} + +///

Container details related to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ContainerDetails { + ///

The identifier of the image related to a finding.

+ #[serde(rename = "ImageId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_id: Option, + ///

The name of the image related to a finding.

+ #[serde(rename = "ImageName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_name: Option, + ///

The date and time when the container started.

+ #[serde(rename = "LaunchedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub launched_at: Option, + ///

The name of the container related to a finding.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateActionTargetRequest { + ///

The description for the custom action target.

+ #[serde(rename = "Description")] + pub description: String, + ///

The ID for the custom action target.

+ #[serde(rename = "Id")] + pub id: String, + ///

The name of the custom action target.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateActionTargetResponse { + ///

The ARN for the custom action target.

+ #[serde(rename = "ActionTargetArn")] + pub action_target_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateInsightRequest { + ///

One or more attributes used to filter the findings included in the insight. Only findings that match the criteria defined in the filters are included in the insight.

+ #[serde(rename = "Filters")] + pub filters: AwsSecurityFindingFilters, + ///

The attribute used as the aggregator to group related findings for the insight.

+ #[serde(rename = "GroupByAttribute")] + pub group_by_attribute: String, + ///

The name of the custom insight to create.

+ #[serde(rename = "Name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateInsightResponse { + ///

The ARN of the insight created.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateMembersRequest { + ///

A list of account ID and email address pairs of the accounts to associate with the Security Hub master account.

+ #[serde(rename = "AccountDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_details: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateMembersResponse { + ///

A list of account ID and email address pairs of the AWS accounts that weren't processed.

+ #[serde(rename = "UnprocessedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unprocessed_accounts: Option>, +} + +///

A date filter for querying findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DateFilter { + ///

A date range for the date filter.

+ #[serde(rename = "DateRange")] + #[serde(skip_serializing_if = "Option::is_none")] + pub date_range: Option, + ///

An end date for the date filter.

+ #[serde(rename = "End")] + #[serde(skip_serializing_if = "Option::is_none")] + pub end: Option, + ///

A start date for the date filter.

+ #[serde(rename = "Start")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start: Option, +} + +///

A date range for the date filter.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DateRange { + ///

A date range unit for the date filter.

+ #[serde(rename = "Unit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unit: Option, + ///

A date range value for the date filter.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeclineInvitationsRequest { + ///

A list of account IDs that specify the accounts that invitations to Security Hub are declined from.

+ #[serde(rename = "AccountIds")] + pub account_ids: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeclineInvitationsResponse { + ///

A list of account ID and email address pairs of the AWS accounts that weren't processed.

+ #[serde(rename = "UnprocessedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unprocessed_accounts: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteActionTargetRequest { + ///

The ARN of the custom action target to delete.

+ #[serde(rename = "ActionTargetArn")] + pub action_target_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteActionTargetResponse { + ///

The ARN of the custom action target that was deleted.

+ #[serde(rename = "ActionTargetArn")] + pub action_target_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteInsightRequest { + ///

The ARN of the insight to delete.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteInsightResponse { + ///

The ARN of the insight that was deleted.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteInvitationsRequest { + ///

A list of the account IDs that sent the invitations to delete.

+ #[serde(rename = "AccountIds")] + pub account_ids: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteInvitationsResponse { + ///

A list of account ID and email address pairs of the AWS accounts that invitations weren't deleted for.

+ #[serde(rename = "UnprocessedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unprocessed_accounts: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteMembersRequest { + ///

A list of account IDs of the member accounts to delete.

+ #[serde(rename = "AccountIds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_ids: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DeleteMembersResponse { + ///

A list of account ID and email address pairs of the AWS accounts that weren't deleted.

+ #[serde(rename = "UnprocessedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unprocessed_accounts: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeActionTargetsRequest { + ///

A list of custom action target ARNs for the custom action targets to retrieve.

+ #[serde(rename = "ActionTargetArns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub action_target_arns: Option>, + ///

The maximum number of results to return.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeActionTargetsResponse { + ///

A list of ActionTarget objects. Each object includes the ActionTargetArn, Description, and Name of a custom action target available in Security Hub.

+ #[serde(rename = "ActionTargets")] + pub action_targets: Vec, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeHubRequest { + ///

The ARN of the Hub resource to retrieve.

+ #[serde(rename = "HubArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub hub_arn: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeHubResponse { + ///

The ARN of the Hub resource retrieved.

+ #[serde(rename = "HubArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub hub_arn: Option, + ///

The date and time when Security Hub was enabled in the account.

+ #[serde(rename = "SubscribedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub subscribed_at: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeProductsRequest { + ///

The maximum number of results to return.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeProductsResponse { + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of products, including details for each product.

+ #[serde(rename = "Products")] + pub products: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DisableImportFindingsForProductRequest { + ///

The ARN of the integrated product to disable the integration for.

+ #[serde(rename = "ProductSubscriptionArn")] + pub product_subscription_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DisableImportFindingsForProductResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DisableSecurityHubRequest {} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DisableSecurityHubResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DisassociateFromMasterAccountRequest {} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DisassociateFromMasterAccountResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DisassociateMembersRequest { + ///

The account IDs of the member accounts to disassociate from the master account.

+ #[serde(rename = "AccountIds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_ids: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DisassociateMembersResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct EnableImportFindingsForProductRequest { + ///

The ARN of the product to enable the integration for.

+ #[serde(rename = "ProductArn")] + pub product_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EnableImportFindingsForProductResponse { + ///

The ARN of your subscription to the product to enable integrations for.

+ #[serde(rename = "ProductSubscriptionArn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_subscription_arn: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct EnableSecurityHubRequest { + ///

The tags to add to the Hub resource when you enable Security Hub.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct EnableSecurityHubResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetEnabledStandardsRequest { + ///

The maximum number of results to return in the response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Paginates results. On your first call to the GetEnabledStandards operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of the standards subscription ARNs for the standards to retrieve.

+ #[serde(rename = "StandardsSubscriptionArns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub standards_subscription_arns: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetEnabledStandardsResponse { + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of StandardsSubscriptions objects that include information about the enabled standards.

+ #[serde(rename = "StandardsSubscriptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub standards_subscriptions: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetFindingsRequest { + ///

The findings attributes used to define a condition to filter the findings returned.

+ #[serde(rename = "Filters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filters: Option, + ///

The maximum number of findings to return.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Paginates results. On your first call to the GetFindings operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Findings attributes used to sort the list of findings returned.

+ #[serde(rename = "SortCriteria")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sort_criteria: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetFindingsResponse { + ///

The findings that matched the filters specified in the request.

+ #[serde(rename = "Findings")] + pub findings: Vec, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetInsightResultsRequest { + ///

The ARN of the insight whose results you want to see.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetInsightResultsResponse { + ///

The insight results returned by the operation.

+ #[serde(rename = "InsightResults")] + pub insight_results: InsightResults, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetInsightsRequest { + ///

The ARNs of the insights that you want to describe.

+ #[serde(rename = "InsightArns")] + #[serde(skip_serializing_if = "Option::is_none")] + pub insight_arns: Option>, + ///

The maximum number of items that you want in the response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Paginates results. On your first call to the GetInsights operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetInsightsResponse { + ///

The insights returned by the operation.

+ #[serde(rename = "Insights")] + pub insights: Vec, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetInvitationsCountRequest {} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetInvitationsCountResponse { + ///

The number of all membership invitations sent to this Security Hub member account, not including the currently accepted invitation.

+ #[serde(rename = "InvitationsCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invitations_count: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMasterAccountRequest {} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMasterAccountResponse { + ///

A list of details about the Security Hub master account for the current member account.

+ #[serde(rename = "Master")] + #[serde(skip_serializing_if = "Option::is_none")] + pub master: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetMembersRequest { + ///

A list of account IDs for the Security Hub member accounts that you want to return the details for.

+ #[serde(rename = "AccountIds")] + pub account_ids: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetMembersResponse { + ///

A list of details about the Security Hub member accounts.

+ #[serde(rename = "Members")] + #[serde(skip_serializing_if = "Option::is_none")] + pub members: Option>, + ///

A list of account ID and email address pairs of the AWS accounts that couldn't be processed.

+ #[serde(rename = "UnprocessedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unprocessed_accounts: Option>, +} + +///

Includes details of the list of the findings that can't be imported.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ImportFindingsError { + ///

The code of the error made during the BatchImportFindings operation.

+ #[serde(rename = "ErrorCode")] + pub error_code: String, + ///

The message of the error made during the BatchImportFindings operation.

+ #[serde(rename = "ErrorMessage")] + pub error_message: String, + ///

The ID of the error made during the BatchImportFindings operation.

+ #[serde(rename = "Id")] + pub id: String, +} + +///

Contains information about a Security Hub insight.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Insight { + ///

One or more attributes used to filter the findings included in the insight. Only findings that match the criteria defined in the filters are included in the insight.

+ #[serde(rename = "Filters")] + pub filters: AwsSecurityFindingFilters, + ///

The attribute that the insight's findings are grouped by. This attribute is used as a findings aggregator for the purposes of viewing and managing multiple related findings under a single operand.

+ #[serde(rename = "GroupByAttribute")] + pub group_by_attribute: String, + ///

The ARN of a Security Hub insight.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, + ///

The name of a Security Hub insight.

+ #[serde(rename = "Name")] + pub name: String, +} + +///

The insight result values returned by the GetInsightResults operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct InsightResultValue { + ///

The number of findings returned for each GroupByAttributeValue.

+ #[serde(rename = "Count")] + pub count: i64, + ///

The value of the attribute that the findings are grouped by for the insight whose results are returned by the GetInsightResults operation.

+ #[serde(rename = "GroupByAttributeValue")] + pub group_by_attribute_value: String, +} + +///

The insight results returned by the GetInsightResults operation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct InsightResults { + ///

The attribute that the findings are grouped by for the insight whose results are returned by the GetInsightResults operation.

+ #[serde(rename = "GroupByAttribute")] + pub group_by_attribute: String, + ///

The ARN of the insight whose results are returned by the GetInsightResults operation.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, + ///

The list of insight result values returned by the GetInsightResults operation.

+ #[serde(rename = "ResultValues")] + pub result_values: Vec, +} + +///

Details about an invitation.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Invitation { + ///

The account ID of the Security Hub master account that the invitation was sent from.

+ #[serde(rename = "AccountId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_id: Option, + ///

The ID of the invitation sent to the member account.

+ #[serde(rename = "InvitationId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invitation_id: Option, + ///

The timestamp of when the invitation was sent.

+ #[serde(rename = "InvitedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invited_at: Option, + ///

The current status of the association between member and master accounts.

+ #[serde(rename = "MemberStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub member_status: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct InviteMembersRequest { + ///

A list of IDs of the AWS accounts that you want to invite to Security Hub as members.

+ #[serde(rename = "AccountIds")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_ids: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct InviteMembersResponse { + ///

A list of account ID and email address pairs of the AWS accounts that couldn't be processed.

+ #[serde(rename = "UnprocessedAccounts")] + #[serde(skip_serializing_if = "Option::is_none")] + pub unprocessed_accounts: Option>, +} + +///

The IP filter for querying findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IpFilter { + ///

A finding's CIDR value.

+ #[serde(rename = "Cidr")] + #[serde(skip_serializing_if = "Option::is_none")] + pub cidr: Option, +} + +///

A keyword filter for querying findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct KeywordFilter { + ///

A value for the keyword.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListEnabledProductsForImportRequest { + ///

The maximum number of items that you want in the response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Paginates results. On your first call to the ListEnabledProductsForImport operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListEnabledProductsForImportResponse { + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A list of ARNs for the resources that represent your subscriptions to products.

+ #[serde(rename = "ProductSubscriptions")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_subscriptions: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListInvitationsRequest { + ///

The maximum number of items that you want in the response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Paginates results. On your first call to the ListInvitations operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListInvitationsResponse { + ///

The details of the invitations returned by the operation.

+ #[serde(rename = "Invitations")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invitations: Option>, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListMembersRequest { + ///

The maximum number of items that you want in the response.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

Paginates results. Set the value of this parameter to NULL on your first call to the ListMembers operation. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Specifies which member accounts the response includes based on their relationship status with the master account. The default value is TRUE. If onlyAssociated is set to TRUE, the response includes member accounts whose relationship status with the master is set to ENABLED or DISABLED. If onlyAssociated is set to FALSE, the response includes all existing member accounts.

+ #[serde(rename = "OnlyAssociated")] + #[serde(skip_serializing_if = "Option::is_none")] + pub only_associated: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListMembersResponse { + ///

Member details returned by the operation.

+ #[serde(rename = "Members")] + #[serde(skip_serializing_if = "Option::is_none")] + pub members: Option>, + ///

The token that is required for pagination.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + ///

The ARN of the resource to retrieve tags for.

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + ///

The tags associated with a resource.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, +} + +///

A list of malware related to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Malware { + ///

The name of the malware that was observed.

+ #[serde(rename = "Name")] + pub name: String, + ///

The file system path of the malware that was observed.

+ #[serde(rename = "Path")] + #[serde(skip_serializing_if = "Option::is_none")] + pub path: Option, + ///

The state of the malware that was observed.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, + ///

The type of the malware that was observed.

+ #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option, +} + +///

The map filter for querying findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MapFilter { + ///

The condition to apply to a key value when querying for findings with a map filter.

+ #[serde(rename = "Comparison")] + #[serde(skip_serializing_if = "Option::is_none")] + pub comparison: Option, + ///

The key of the map filter.

+ #[serde(rename = "Key")] + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option, + ///

The value for the key in the map filter.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +///

The details about a member account.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Member { + ///

The AWS account ID of the member account.

+ #[serde(rename = "AccountId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_id: Option, + ///

The email address of the member account.

+ #[serde(rename = "Email")] + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + ///

A timestamp for the date and time when the invitation was sent to the member account.

+ #[serde(rename = "InvitedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invited_at: Option, + ///

The AWS account ID of the Security Hub master account associated with this member account.

+ #[serde(rename = "MasterId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub master_id: Option, + ///

The status of the relationship between the member account and its master account.

+ #[serde(rename = "MemberStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub member_status: Option, + ///

The timestamp for the date and time when the member account was updated.

+ #[serde(rename = "UpdatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub updated_at: Option, +} + +///

The details of network-related information about a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Network { + ///

The destination domain of network-related information about a finding.

+ #[serde(rename = "DestinationDomain")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_domain: Option, + ///

The destination IPv4 address of network-related information about a finding.

+ #[serde(rename = "DestinationIpV4")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_ip_v4: Option, + ///

The destination IPv6 address of network-related information about a finding.

+ #[serde(rename = "DestinationIpV6")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_ip_v6: Option, + ///

The destination port of network-related information about a finding.

+ #[serde(rename = "DestinationPort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_port: Option, + ///

The direction of network traffic associated with a finding.

+ #[serde(rename = "Direction")] + #[serde(skip_serializing_if = "Option::is_none")] + pub direction: Option, + ///

The protocol of network-related information about a finding.

+ #[serde(rename = "Protocol")] + #[serde(skip_serializing_if = "Option::is_none")] + pub protocol: Option, + ///

The source domain of network-related information about a finding.

+ #[serde(rename = "SourceDomain")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_domain: Option, + ///

The source IPv4 address of network-related information about a finding.

+ #[serde(rename = "SourceIpV4")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_ip_v4: Option, + ///

The source IPv6 address of network-related information about a finding.

+ #[serde(rename = "SourceIpV6")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_ip_v6: Option, + ///

The source media access control (MAC) address of network-related information about a finding.

+ #[serde(rename = "SourceMac")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_mac: Option, + ///

The source port of network-related information about a finding.

+ #[serde(rename = "SourcePort")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_port: Option, +} + +///

A user-defined note added to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Note { + ///

The text of a note.

+ #[serde(rename = "Text")] + pub text: String, + ///

The timestamp of when the note was updated.

+ #[serde(rename = "UpdatedAt")] + pub updated_at: String, + ///

The principal that created a note.

+ #[serde(rename = "UpdatedBy")] + pub updated_by: String, +} + +///

The updated note.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct NoteUpdate { + ///

The updated note text.

+ #[serde(rename = "Text")] + pub text: String, + ///

The principal that updated the note.

+ #[serde(rename = "UpdatedBy")] + pub updated_by: String, +} + +///

A number filter for querying findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NumberFilter { + ///

The equal-to condition to be applied to a single field when querying for findings.

+ #[serde(rename = "Eq")] + #[serde(skip_serializing_if = "Option::is_none")] + pub eq: Option, + ///

The greater-than-equal condition to be applied to a single field when querying for findings.

+ #[serde(rename = "Gte")] + #[serde(skip_serializing_if = "Option::is_none")] + pub gte: Option, + ///

The less-than-equal condition to be applied to a single field when querying for findings.

+ #[serde(rename = "Lte")] + #[serde(skip_serializing_if = "Option::is_none")] + pub lte: Option, +} + +///

The details of process-related information about a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ProcessDetails { + ///

The date/time that the process was launched.

+ #[serde(rename = "LaunchedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub launched_at: Option, + ///

The name of the process.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

The parent process ID.

+ #[serde(rename = "ParentPid")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parent_pid: Option, + ///

The path to the process executable.

+ #[serde(rename = "Path")] + #[serde(skip_serializing_if = "Option::is_none")] + pub path: Option, + ///

The process ID.

+ #[serde(rename = "Pid")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pid: Option, + ///

The date and time when the process was terminated.

+ #[serde(rename = "TerminatedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub terminated_at: Option, +} + +///

Contains details about a product.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Product { + ///

The URL used to activate the product.

+ #[serde(rename = "ActivationUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub activation_url: Option, + ///

The categories assigned to the product.

+ #[serde(rename = "Categories")] + #[serde(skip_serializing_if = "Option::is_none")] + pub categories: Option>, + ///

The name of the company that provides the product.

+ #[serde(rename = "CompanyName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub company_name: Option, + ///

A description of the product.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The URL for the page that contains more information about the product.

+ #[serde(rename = "MarketplaceUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub marketplace_url: Option, + ///

The ARN assigned to the product.

+ #[serde(rename = "ProductArn")] + pub product_arn: String, + ///

The name of the product.

+ #[serde(rename = "ProductName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_name: Option, + ///

The resource policy associated with the product.

+ #[serde(rename = "ProductSubscriptionResourcePolicy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product_subscription_resource_policy: Option, +} + +///

A recommendation on how to remediate the issue identified in a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Recommendation { + ///

Describes the recommended steps to take to remediate an issue identified in a finding.

+ #[serde(rename = "Text")] + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, + ///

A URL to a page or site that contains information about how to remediate a finding.

+ #[serde(rename = "Url")] + #[serde(skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +///

Details about a related finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RelatedFinding { + ///

The product-generated identifier for a related finding.

+ #[serde(rename = "Id")] + pub id: String, + ///

The ARN of the product that generated a related finding.

+ #[serde(rename = "ProductArn")] + pub product_arn: String, +} + +///

Details about the remediation steps for a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Remediation { + ///

A recommendation on the steps to take to remediate the issue identified by a finding.

+ #[serde(rename = "Recommendation")] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommendation: Option, +} + +///

A resource related to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Resource { + ///

Additional details about the resource related to a finding.

+ #[serde(rename = "Details")] + #[serde(skip_serializing_if = "Option::is_none")] + pub details: Option, + ///

The canonical identifier for the given resource type.

+ #[serde(rename = "Id")] + pub id: String, + ///

The canonical AWS partition name that the Region is assigned to.

+ #[serde(rename = "Partition")] + #[serde(skip_serializing_if = "Option::is_none")] + pub partition: Option, + ///

The canonical AWS external Region name where this resource is located.

+ #[serde(rename = "Region")] + #[serde(skip_serializing_if = "Option::is_none")] + pub region: Option, + ///

A list of AWS tags associated with a resource at the time the finding was processed.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option<::std::collections::HashMap>, + ///

The type of the resource that details are provided for.

+ #[serde(rename = "Type")] + pub type_: String, +} + +///

Additional details about a resource related to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ResourceDetails { + ///

Details about an Amazon EC2 instance related to a finding.

+ #[serde(rename = "AwsEc2Instance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub aws_ec_2_instance: Option, + ///

Details about an IAM access key related to a finding.

+ #[serde(rename = "AwsIamAccessKey")] + #[serde(skip_serializing_if = "Option::is_none")] + pub aws_iam_access_key: Option, + ///

Details about an Amazon S3 Bucket related to a finding.

+ #[serde(rename = "AwsS3Bucket")] + #[serde(skip_serializing_if = "Option::is_none")] + pub aws_s3_bucket: Option, + ///

Details about a container resource related to a finding.

+ #[serde(rename = "Container")] + #[serde(skip_serializing_if = "Option::is_none")] + pub container: Option, + ///

Details about a resource that doesn't have a specific type defined.

+ #[serde(rename = "Other")] + #[serde(skip_serializing_if = "Option::is_none")] + pub other: Option<::std::collections::HashMap>, +} + +///

Details about the account that wasn't processed.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Result { + ///

An AWS account ID of the account that wasn't be processed.

+ #[serde(rename = "AccountId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub account_id: Option, + ///

The reason that the account wasn't be processed.

+ #[serde(rename = "ProcessingResult")] + #[serde(skip_serializing_if = "Option::is_none")] + pub processing_result: Option, +} + +///

The severity of the finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Severity { + ///

The normalized severity of a finding.

+ #[serde(rename = "Normalized")] + pub normalized: i64, + ///

The native severity as defined by the AWS service or integrated partner product that generated the finding.

+ #[serde(rename = "Product")] + #[serde(skip_serializing_if = "Option::is_none")] + pub product: Option, +} + +///

A collection of finding attributes used to sort findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct SortCriterion { + ///

The finding attribute used to sort findings.

+ #[serde(rename = "Field")] + #[serde(skip_serializing_if = "Option::is_none")] + pub field: Option, + ///

The order used to sort findings.

+ #[serde(rename = "SortOrder")] + #[serde(skip_serializing_if = "Option::is_none")] + pub sort_order: Option, +} + +///

A resource that represents your subscription to a supported standard.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StandardsSubscription { + ///

The ARN of a standard.

In this release, Security Hub supports only the CIS AWS Foundations standard, which uses the following ARN: arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0.

+ #[serde(rename = "StandardsArn")] + pub standards_arn: String, + ///

A key-value pair of input for the standard.

+ #[serde(rename = "StandardsInput")] + pub standards_input: ::std::collections::HashMap, + ///

The status of the standards subscription.

+ #[serde(rename = "StandardsStatus")] + pub standards_status: String, + ///

The ARN of a resource that represents your subscription to a supported standard.

+ #[serde(rename = "StandardsSubscriptionArn")] + pub standards_subscription_arn: String, +} + +///

The standard that you want to enable.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StandardsSubscriptionRequest { + ///

The ARN of the standard that you want to enable.

In this release, Security Hub only supports the CIS AWS Foundations standard.

Its ARN is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0.

+ #[serde(rename = "StandardsArn")] + pub standards_arn: String, + ///

A key-value pair of input for the standard.

+ #[serde(rename = "StandardsInput")] + #[serde(skip_serializing_if = "Option::is_none")] + pub standards_input: Option<::std::collections::HashMap>, +} + +///

A string filter for querying findings.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StringFilter { + ///

The condition to be applied to a string value when querying for findings.

+ #[serde(rename = "Comparison")] + #[serde(skip_serializing_if = "Option::is_none")] + pub comparison: Option, + ///

The string filter value.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

The ARN of the resource to apply the tags to.

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The tags to add to the resource.

+ #[serde(rename = "Tags")] + pub tags: ::std::collections::HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + +///

Details about the threat intel related to a finding.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ThreatIntelIndicator { + ///

The category of a threat intel indicator.

+ #[serde(rename = "Category")] + #[serde(skip_serializing_if = "Option::is_none")] + pub category: Option, + ///

The date and time when the most recent instance of a threat intel indicator was observed.

+ #[serde(rename = "LastObservedAt")] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_observed_at: Option, + ///

The source of the threat intel indicator.

+ #[serde(rename = "Source")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, + ///

The URL to the page or site where you can get more information about the threat intel indicator.

+ #[serde(rename = "SourceUrl")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_url: Option, + ///

The type of a threat intel indicator.

+ #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option, + ///

The value of a threat intel indicator.

+ #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

The ARN of the resource to remove the tags from.

+ #[serde(rename = "ResourceArn")] + pub resource_arn: String, + ///

The tag keys associated with the tags to remove from the resource.

+ #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateActionTargetRequest { + ///

The ARN of the custom action target to update.

+ #[serde(rename = "ActionTargetArn")] + pub action_target_arn: String, + ///

The updated description for the custom action target.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The updated name of the custom action target.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateActionTargetResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateFindingsRequest { + ///

A collection of attributes that specify which findings you want to update.

+ #[serde(rename = "Filters")] + pub filters: AwsSecurityFindingFilters, + ///

The updated note for the finding.

+ #[serde(rename = "Note")] + #[serde(skip_serializing_if = "Option::is_none")] + pub note: Option, + ///

The updated record state for the finding.

+ #[serde(rename = "RecordState")] + #[serde(skip_serializing_if = "Option::is_none")] + pub record_state: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateFindingsResponse {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateInsightRequest { + ///

The updated filters that define this insight.

+ #[serde(rename = "Filters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub filters: Option, + ///

The updated GroupBy attribute that defines this insight.

+ #[serde(rename = "GroupByAttribute")] + #[serde(skip_serializing_if = "Option::is_none")] + pub group_by_attribute: Option, + ///

The ARN of the insight that you want to update.

+ #[serde(rename = "InsightArn")] + pub insight_arn: String, + ///

The updated name for the insight.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateInsightResponse {} + +/// Errors returned by AcceptInvitation +#[derive(Debug, PartialEq)] +pub enum AcceptInvitationError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl AcceptInvitationError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(AcceptInvitationError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(AcceptInvitationError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(AcceptInvitationError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(AcceptInvitationError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(AcceptInvitationError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for AcceptInvitationError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AcceptInvitationError { + fn description(&self) -> &str { + match *self { + AcceptInvitationError::Internal(ref cause) => cause, + AcceptInvitationError::InvalidAccess(ref cause) => cause, + AcceptInvitationError::InvalidInput(ref cause) => cause, + AcceptInvitationError::LimitExceeded(ref cause) => cause, + AcceptInvitationError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by BatchDisableStandards +#[derive(Debug, PartialEq)] +pub enum BatchDisableStandardsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl BatchDisableStandardsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(BatchDisableStandardsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(BatchDisableStandardsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(BatchDisableStandardsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(BatchDisableStandardsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for BatchDisableStandardsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for BatchDisableStandardsError { + fn description(&self) -> &str { + match *self { + BatchDisableStandardsError::Internal(ref cause) => cause, + BatchDisableStandardsError::InvalidAccess(ref cause) => cause, + BatchDisableStandardsError::InvalidInput(ref cause) => cause, + BatchDisableStandardsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by BatchEnableStandards +#[derive(Debug, PartialEq)] +pub enum BatchEnableStandardsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl BatchEnableStandardsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(BatchEnableStandardsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(BatchEnableStandardsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(BatchEnableStandardsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(BatchEnableStandardsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for BatchEnableStandardsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for BatchEnableStandardsError { + fn description(&self) -> &str { + match *self { + BatchEnableStandardsError::Internal(ref cause) => cause, + BatchEnableStandardsError::InvalidAccess(ref cause) => cause, + BatchEnableStandardsError::InvalidInput(ref cause) => cause, + BatchEnableStandardsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by BatchImportFindings +#[derive(Debug, PartialEq)] +pub enum BatchImportFindingsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl BatchImportFindingsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(BatchImportFindingsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(BatchImportFindingsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(BatchImportFindingsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(BatchImportFindingsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for BatchImportFindingsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for BatchImportFindingsError { + fn description(&self) -> &str { + match *self { + BatchImportFindingsError::Internal(ref cause) => cause, + BatchImportFindingsError::InvalidAccess(ref cause) => cause, + BatchImportFindingsError::InvalidInput(ref cause) => cause, + BatchImportFindingsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by CreateActionTarget +#[derive(Debug, PartialEq)] +pub enum CreateActionTargetError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The resource specified in the request conflicts with an existing resource.

+ ResourceConflict(String), +} + +impl CreateActionTargetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(CreateActionTargetError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(CreateActionTargetError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(CreateActionTargetError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateActionTargetError::LimitExceeded(err.msg)) + } + "ResourceConflictException" => { + return RusotoError::Service(CreateActionTargetError::ResourceConflict(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateActionTargetError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateActionTargetError { + fn description(&self) -> &str { + match *self { + CreateActionTargetError::Internal(ref cause) => cause, + CreateActionTargetError::InvalidAccess(ref cause) => cause, + CreateActionTargetError::InvalidInput(ref cause) => cause, + CreateActionTargetError::LimitExceeded(ref cause) => cause, + CreateActionTargetError::ResourceConflict(ref cause) => cause, + } + } +} +/// Errors returned by CreateInsight +#[derive(Debug, PartialEq)] +pub enum CreateInsightError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The resource specified in the request conflicts with an existing resource.

+ ResourceConflict(String), +} + +impl CreateInsightError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(CreateInsightError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(CreateInsightError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(CreateInsightError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateInsightError::LimitExceeded(err.msg)) + } + "ResourceConflictException" => { + return RusotoError::Service(CreateInsightError::ResourceConflict(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateInsightError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateInsightError { + fn description(&self) -> &str { + match *self { + CreateInsightError::Internal(ref cause) => cause, + CreateInsightError::InvalidAccess(ref cause) => cause, + CreateInsightError::InvalidInput(ref cause) => cause, + CreateInsightError::LimitExceeded(ref cause) => cause, + CreateInsightError::ResourceConflict(ref cause) => cause, + } + } +} +/// Errors returned by CreateMembers +#[derive(Debug, PartialEq)] +pub enum CreateMembersError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The resource specified in the request conflicts with an existing resource.

+ ResourceConflict(String), +} + +impl CreateMembersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(CreateMembersError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(CreateMembersError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(CreateMembersError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(CreateMembersError::LimitExceeded(err.msg)) + } + "ResourceConflictException" => { + return RusotoError::Service(CreateMembersError::ResourceConflict(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateMembersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateMembersError { + fn description(&self) -> &str { + match *self { + CreateMembersError::Internal(ref cause) => cause, + CreateMembersError::InvalidAccess(ref cause) => cause, + CreateMembersError::InvalidInput(ref cause) => cause, + CreateMembersError::LimitExceeded(ref cause) => cause, + CreateMembersError::ResourceConflict(ref cause) => cause, + } + } +} +/// Errors returned by DeclineInvitations +#[derive(Debug, PartialEq)] +pub enum DeclineInvitationsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DeclineInvitationsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DeclineInvitationsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DeclineInvitationsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeclineInvitationsError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeclineInvitationsError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeclineInvitationsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeclineInvitationsError { + fn description(&self) -> &str { + match *self { + DeclineInvitationsError::Internal(ref cause) => cause, + DeclineInvitationsError::InvalidAccess(ref cause) => cause, + DeclineInvitationsError::InvalidInput(ref cause) => cause, + DeclineInvitationsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DeleteActionTarget +#[derive(Debug, PartialEq)] +pub enum DeleteActionTargetError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DeleteActionTargetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DeleteActionTargetError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DeleteActionTargetError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeleteActionTargetError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteActionTargetError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteActionTargetError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteActionTargetError { + fn description(&self) -> &str { + match *self { + DeleteActionTargetError::Internal(ref cause) => cause, + DeleteActionTargetError::InvalidAccess(ref cause) => cause, + DeleteActionTargetError::InvalidInput(ref cause) => cause, + DeleteActionTargetError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DeleteInsight +#[derive(Debug, PartialEq)] +pub enum DeleteInsightError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DeleteInsightError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DeleteInsightError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DeleteInsightError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeleteInsightError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DeleteInsightError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteInsightError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteInsightError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteInsightError { + fn description(&self) -> &str { + match *self { + DeleteInsightError::Internal(ref cause) => cause, + DeleteInsightError::InvalidAccess(ref cause) => cause, + DeleteInsightError::InvalidInput(ref cause) => cause, + DeleteInsightError::LimitExceeded(ref cause) => cause, + DeleteInsightError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DeleteInvitations +#[derive(Debug, PartialEq)] +pub enum DeleteInvitationsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DeleteInvitationsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DeleteInvitationsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DeleteInvitationsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeleteInvitationsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DeleteInvitationsError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteInvitationsError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteInvitationsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteInvitationsError { + fn description(&self) -> &str { + match *self { + DeleteInvitationsError::Internal(ref cause) => cause, + DeleteInvitationsError::InvalidAccess(ref cause) => cause, + DeleteInvitationsError::InvalidInput(ref cause) => cause, + DeleteInvitationsError::LimitExceeded(ref cause) => cause, + DeleteInvitationsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DeleteMembers +#[derive(Debug, PartialEq)] +pub enum DeleteMembersError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DeleteMembersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DeleteMembersError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DeleteMembersError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DeleteMembersError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DeleteMembersError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteMembersError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteMembersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteMembersError { + fn description(&self) -> &str { + match *self { + DeleteMembersError::Internal(ref cause) => cause, + DeleteMembersError::InvalidAccess(ref cause) => cause, + DeleteMembersError::InvalidInput(ref cause) => cause, + DeleteMembersError::LimitExceeded(ref cause) => cause, + DeleteMembersError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DescribeActionTargets +#[derive(Debug, PartialEq)] +pub enum DescribeActionTargetsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DescribeActionTargetsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DescribeActionTargetsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DescribeActionTargetsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DescribeActionTargetsError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeActionTargetsError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeActionTargetsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeActionTargetsError { + fn description(&self) -> &str { + match *self { + DescribeActionTargetsError::Internal(ref cause) => cause, + DescribeActionTargetsError::InvalidAccess(ref cause) => cause, + DescribeActionTargetsError::InvalidInput(ref cause) => cause, + DescribeActionTargetsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DescribeHub +#[derive(Debug, PartialEq)] +pub enum DescribeHubError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DescribeHubError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DescribeHubError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DescribeHubError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DescribeHubError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DescribeHubError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeHubError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeHubError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeHubError { + fn description(&self) -> &str { + match *self { + DescribeHubError::Internal(ref cause) => cause, + DescribeHubError::InvalidAccess(ref cause) => cause, + DescribeHubError::InvalidInput(ref cause) => cause, + DescribeHubError::LimitExceeded(ref cause) => cause, + DescribeHubError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DescribeProducts +#[derive(Debug, PartialEq)] +pub enum DescribeProductsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl DescribeProductsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DescribeProductsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DescribeProductsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DescribeProductsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DescribeProductsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeProductsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeProductsError { + fn description(&self) -> &str { + match *self { + DescribeProductsError::Internal(ref cause) => cause, + DescribeProductsError::InvalidAccess(ref cause) => cause, + DescribeProductsError::InvalidInput(ref cause) => cause, + DescribeProductsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by DisableImportFindingsForProduct +#[derive(Debug, PartialEq)] +pub enum DisableImportFindingsForProductError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DisableImportFindingsForProductError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DisableImportFindingsForProductError::Internal( + err.msg, + )) + } + "InvalidAccessException" => { + return RusotoError::Service( + DisableImportFindingsForProductError::InvalidAccess(err.msg), + ) + } + "InvalidInputException" => { + return RusotoError::Service( + DisableImportFindingsForProductError::InvalidInput(err.msg), + ) + } + "LimitExceededException" => { + return RusotoError::Service( + DisableImportFindingsForProductError::LimitExceeded(err.msg), + ) + } + "ResourceNotFoundException" => { + return RusotoError::Service( + DisableImportFindingsForProductError::ResourceNotFound(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DisableImportFindingsForProductError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DisableImportFindingsForProductError { + fn description(&self) -> &str { + match *self { + DisableImportFindingsForProductError::Internal(ref cause) => cause, + DisableImportFindingsForProductError::InvalidAccess(ref cause) => cause, + DisableImportFindingsForProductError::InvalidInput(ref cause) => cause, + DisableImportFindingsForProductError::LimitExceeded(ref cause) => cause, + DisableImportFindingsForProductError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DisableSecurityHub +#[derive(Debug, PartialEq)] +pub enum DisableSecurityHubError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DisableSecurityHubError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DisableSecurityHubError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DisableSecurityHubError::InvalidAccess(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DisableSecurityHubError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DisableSecurityHubError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DisableSecurityHubError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DisableSecurityHubError { + fn description(&self) -> &str { + match *self { + DisableSecurityHubError::Internal(ref cause) => cause, + DisableSecurityHubError::InvalidAccess(ref cause) => cause, + DisableSecurityHubError::LimitExceeded(ref cause) => cause, + DisableSecurityHubError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DisassociateFromMasterAccount +#[derive(Debug, PartialEq)] +pub enum DisassociateFromMasterAccountError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DisassociateFromMasterAccountError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DisassociateFromMasterAccountError::Internal( + err.msg, + )) + } + "InvalidAccessException" => { + return RusotoError::Service(DisassociateFromMasterAccountError::InvalidAccess( + err.msg, + )) + } + "InvalidInputException" => { + return RusotoError::Service(DisassociateFromMasterAccountError::InvalidInput( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(DisassociateFromMasterAccountError::LimitExceeded( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service( + DisassociateFromMasterAccountError::ResourceNotFound(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DisassociateFromMasterAccountError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DisassociateFromMasterAccountError { + fn description(&self) -> &str { + match *self { + DisassociateFromMasterAccountError::Internal(ref cause) => cause, + DisassociateFromMasterAccountError::InvalidAccess(ref cause) => cause, + DisassociateFromMasterAccountError::InvalidInput(ref cause) => cause, + DisassociateFromMasterAccountError::LimitExceeded(ref cause) => cause, + DisassociateFromMasterAccountError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by DisassociateMembers +#[derive(Debug, PartialEq)] +pub enum DisassociateMembersError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl DisassociateMembersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(DisassociateMembersError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(DisassociateMembersError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(DisassociateMembersError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(DisassociateMembersError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DisassociateMembersError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DisassociateMembersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DisassociateMembersError { + fn description(&self) -> &str { + match *self { + DisassociateMembersError::Internal(ref cause) => cause, + DisassociateMembersError::InvalidAccess(ref cause) => cause, + DisassociateMembersError::InvalidInput(ref cause) => cause, + DisassociateMembersError::LimitExceeded(ref cause) => cause, + DisassociateMembersError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by EnableImportFindingsForProduct +#[derive(Debug, PartialEq)] +pub enum EnableImportFindingsForProductError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The resource specified in the request conflicts with an existing resource.

+ ResourceConflict(String), +} + +impl EnableImportFindingsForProductError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(EnableImportFindingsForProductError::Internal( + err.msg, + )) + } + "InvalidAccessException" => { + return RusotoError::Service( + EnableImportFindingsForProductError::InvalidAccess(err.msg), + ) + } + "InvalidInputException" => { + return RusotoError::Service(EnableImportFindingsForProductError::InvalidInput( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service( + EnableImportFindingsForProductError::LimitExceeded(err.msg), + ) + } + "ResourceConflictException" => { + return RusotoError::Service( + EnableImportFindingsForProductError::ResourceConflict(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for EnableImportFindingsForProductError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for EnableImportFindingsForProductError { + fn description(&self) -> &str { + match *self { + EnableImportFindingsForProductError::Internal(ref cause) => cause, + EnableImportFindingsForProductError::InvalidAccess(ref cause) => cause, + EnableImportFindingsForProductError::InvalidInput(ref cause) => cause, + EnableImportFindingsForProductError::LimitExceeded(ref cause) => cause, + EnableImportFindingsForProductError::ResourceConflict(ref cause) => cause, + } + } +} +/// Errors returned by EnableSecurityHub +#[derive(Debug, PartialEq)] +pub enum EnableSecurityHubError { + ///

You don't have permission to perform the action specified in the request.

+ AccessDenied(String), + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The resource specified in the request conflicts with an existing resource.

+ ResourceConflict(String), +} + +impl EnableSecurityHubError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(EnableSecurityHubError::AccessDenied(err.msg)) + } + "InternalException" => { + return RusotoError::Service(EnableSecurityHubError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(EnableSecurityHubError::InvalidAccess(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(EnableSecurityHubError::LimitExceeded(err.msg)) + } + "ResourceConflictException" => { + return RusotoError::Service(EnableSecurityHubError::ResourceConflict(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for EnableSecurityHubError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for EnableSecurityHubError { + fn description(&self) -> &str { + match *self { + EnableSecurityHubError::AccessDenied(ref cause) => cause, + EnableSecurityHubError::Internal(ref cause) => cause, + EnableSecurityHubError::InvalidAccess(ref cause) => cause, + EnableSecurityHubError::LimitExceeded(ref cause) => cause, + EnableSecurityHubError::ResourceConflict(ref cause) => cause, + } + } +} +/// Errors returned by GetEnabledStandards +#[derive(Debug, PartialEq)] +pub enum GetEnabledStandardsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl GetEnabledStandardsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetEnabledStandardsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetEnabledStandardsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetEnabledStandardsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetEnabledStandardsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetEnabledStandardsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetEnabledStandardsError { + fn description(&self) -> &str { + match *self { + GetEnabledStandardsError::Internal(ref cause) => cause, + GetEnabledStandardsError::InvalidAccess(ref cause) => cause, + GetEnabledStandardsError::InvalidInput(ref cause) => cause, + GetEnabledStandardsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by GetFindings +#[derive(Debug, PartialEq)] +pub enum GetFindingsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl GetFindingsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetFindingsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetFindingsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetFindingsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetFindingsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetFindingsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetFindingsError { + fn description(&self) -> &str { + match *self { + GetFindingsError::Internal(ref cause) => cause, + GetFindingsError::InvalidAccess(ref cause) => cause, + GetFindingsError::InvalidInput(ref cause) => cause, + GetFindingsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by GetInsightResults +#[derive(Debug, PartialEq)] +pub enum GetInsightResultsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl GetInsightResultsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetInsightResultsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetInsightResultsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetInsightResultsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetInsightResultsError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetInsightResultsError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetInsightResultsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetInsightResultsError { + fn description(&self) -> &str { + match *self { + GetInsightResultsError::Internal(ref cause) => cause, + GetInsightResultsError::InvalidAccess(ref cause) => cause, + GetInsightResultsError::InvalidInput(ref cause) => cause, + GetInsightResultsError::LimitExceeded(ref cause) => cause, + GetInsightResultsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by GetInsights +#[derive(Debug, PartialEq)] +pub enum GetInsightsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl GetInsightsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetInsightsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetInsightsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetInsightsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetInsightsError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetInsightsError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetInsightsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetInsightsError { + fn description(&self) -> &str { + match *self { + GetInsightsError::Internal(ref cause) => cause, + GetInsightsError::InvalidAccess(ref cause) => cause, + GetInsightsError::InvalidInput(ref cause) => cause, + GetInsightsError::LimitExceeded(ref cause) => cause, + GetInsightsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by GetInvitationsCount +#[derive(Debug, PartialEq)] +pub enum GetInvitationsCountError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl GetInvitationsCountError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetInvitationsCountError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetInvitationsCountError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetInvitationsCountError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetInvitationsCountError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetInvitationsCountError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetInvitationsCountError { + fn description(&self) -> &str { + match *self { + GetInvitationsCountError::Internal(ref cause) => cause, + GetInvitationsCountError::InvalidAccess(ref cause) => cause, + GetInvitationsCountError::InvalidInput(ref cause) => cause, + GetInvitationsCountError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by GetMasterAccount +#[derive(Debug, PartialEq)] +pub enum GetMasterAccountError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl GetMasterAccountError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetMasterAccountError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetMasterAccountError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetMasterAccountError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetMasterAccountError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetMasterAccountError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetMasterAccountError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetMasterAccountError { + fn description(&self) -> &str { + match *self { + GetMasterAccountError::Internal(ref cause) => cause, + GetMasterAccountError::InvalidAccess(ref cause) => cause, + GetMasterAccountError::InvalidInput(ref cause) => cause, + GetMasterAccountError::LimitExceeded(ref cause) => cause, + GetMasterAccountError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by GetMembers +#[derive(Debug, PartialEq)] +pub enum GetMembersError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl GetMembersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(GetMembersError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(GetMembersError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(GetMembersError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(GetMembersError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(GetMembersError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetMembersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetMembersError { + fn description(&self) -> &str { + match *self { + GetMembersError::Internal(ref cause) => cause, + GetMembersError::InvalidAccess(ref cause) => cause, + GetMembersError::InvalidInput(ref cause) => cause, + GetMembersError::LimitExceeded(ref cause) => cause, + GetMembersError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by InviteMembers +#[derive(Debug, PartialEq)] +pub enum InviteMembersError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl InviteMembersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(InviteMembersError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(InviteMembersError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(InviteMembersError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(InviteMembersError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(InviteMembersError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for InviteMembersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for InviteMembersError { + fn description(&self) -> &str { + match *self { + InviteMembersError::Internal(ref cause) => cause, + InviteMembersError::InvalidAccess(ref cause) => cause, + InviteMembersError::InvalidInput(ref cause) => cause, + InviteMembersError::LimitExceeded(ref cause) => cause, + InviteMembersError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by ListEnabledProductsForImport +#[derive(Debug, PartialEq)] +pub enum ListEnabledProductsForImportError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl ListEnabledProductsForImportError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListEnabledProductsForImportError::Internal( + err.msg, + )) + } + "InvalidAccessException" => { + return RusotoError::Service(ListEnabledProductsForImportError::InvalidAccess( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(ListEnabledProductsForImportError::LimitExceeded( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListEnabledProductsForImportError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListEnabledProductsForImportError { + fn description(&self) -> &str { + match *self { + ListEnabledProductsForImportError::Internal(ref cause) => cause, + ListEnabledProductsForImportError::InvalidAccess(ref cause) => cause, + ListEnabledProductsForImportError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by ListInvitations +#[derive(Debug, PartialEq)] +pub enum ListInvitationsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl ListInvitationsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListInvitationsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(ListInvitationsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(ListInvitationsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(ListInvitationsError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListInvitationsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListInvitationsError { + fn description(&self) -> &str { + match *self { + ListInvitationsError::Internal(ref cause) => cause, + ListInvitationsError::InvalidAccess(ref cause) => cause, + ListInvitationsError::InvalidInput(ref cause) => cause, + ListInvitationsError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by ListMembers +#[derive(Debug, PartialEq)] +pub enum ListMembersError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), +} + +impl ListMembersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListMembersError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(ListMembersError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(ListMembersError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(ListMembersError::LimitExceeded(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListMembersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListMembersError { + fn description(&self) -> &str { + match *self { + ListMembersError::Internal(ref cause) => cause, + ListMembersError::InvalidAccess(ref cause) => cause, + ListMembersError::InvalidInput(ref cause) => cause, + ListMembersError::LimitExceeded(ref cause) => cause, + } + } +} +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

Internal server error.

+ Internal(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(ListTagsForResourceError::Internal(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(ListTagsForResourceError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListTagsForResourceError::ResourceNotFound( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::Internal(ref cause) => cause, + ListTagsForResourceError::InvalidInput(ref cause) => cause, + ListTagsForResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

Internal server error.

+ Internal(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(TagResourceError::Internal(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(TagResourceError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(TagResourceError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::Internal(ref cause) => cause, + TagResourceError::InvalidInput(ref cause) => cause, + TagResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

Internal server error.

+ Internal(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(UntagResourceError::Internal(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(UntagResourceError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UntagResourceError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::Internal(ref cause) => cause, + UntagResourceError::InvalidInput(ref cause) => cause, + UntagResourceError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UpdateActionTarget +#[derive(Debug, PartialEq)] +pub enum UpdateActionTargetError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl UpdateActionTargetError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(UpdateActionTargetError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(UpdateActionTargetError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(UpdateActionTargetError::InvalidInput(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateActionTargetError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateActionTargetError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateActionTargetError { + fn description(&self) -> &str { + match *self { + UpdateActionTargetError::Internal(ref cause) => cause, + UpdateActionTargetError::InvalidAccess(ref cause) => cause, + UpdateActionTargetError::InvalidInput(ref cause) => cause, + UpdateActionTargetError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UpdateFindings +#[derive(Debug, PartialEq)] +pub enum UpdateFindingsError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl UpdateFindingsError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(UpdateFindingsError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(UpdateFindingsError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(UpdateFindingsError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(UpdateFindingsError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateFindingsError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateFindingsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateFindingsError { + fn description(&self) -> &str { + match *self { + UpdateFindingsError::Internal(ref cause) => cause, + UpdateFindingsError::InvalidAccess(ref cause) => cause, + UpdateFindingsError::InvalidInput(ref cause) => cause, + UpdateFindingsError::LimitExceeded(ref cause) => cause, + UpdateFindingsError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Errors returned by UpdateInsight +#[derive(Debug, PartialEq)] +pub enum UpdateInsightError { + ///

Internal server error.

+ Internal(String), + ///

AWS Security Hub isn't enabled for the account used to make this request.

+ InvalidAccess(String), + ///

The request was rejected because you supplied an invalid or out-of-range value for an input parameter.

+ InvalidInput(String), + ///

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

+ LimitExceeded(String), + ///

The request was rejected because we can't find the specified resource.

+ ResourceNotFound(String), +} + +impl UpdateInsightError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse_rest(&res) { + match err.typ.as_str() { + "InternalException" => { + return RusotoError::Service(UpdateInsightError::Internal(err.msg)) + } + "InvalidAccessException" => { + return RusotoError::Service(UpdateInsightError::InvalidAccess(err.msg)) + } + "InvalidInputException" => { + return RusotoError::Service(UpdateInsightError::InvalidInput(err.msg)) + } + "LimitExceededException" => { + return RusotoError::Service(UpdateInsightError::LimitExceeded(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateInsightError::ResourceNotFound(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateInsightError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateInsightError { + fn description(&self) -> &str { + match *self { + UpdateInsightError::Internal(ref cause) => cause, + UpdateInsightError::InvalidAccess(ref cause) => cause, + UpdateInsightError::InvalidInput(ref cause) => cause, + UpdateInsightError::LimitExceeded(ref cause) => cause, + UpdateInsightError::ResourceNotFound(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the AWS SecurityHub API. AWS SecurityHub clients implement this trait. +pub trait SecurityHub { + ///

Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from. When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account.

+ fn accept_invitation( + &self, + input: AcceptInvitationRequest, + ) -> RusotoFuture; + + ///

Disables the standards specified by the provided StandardsSubscriptionArns. For more information, see Standards Supported in AWS Security Hub.

+ fn batch_disable_standards( + &self, + input: BatchDisableStandardsRequest, + ) -> RusotoFuture; + + ///

Enables the standards specified by the provided standardsArn. In this release, only CIS AWS Foundations standards are supported. For more information, see Standards Supported in AWS Security Hub.

+ fn batch_enable_standards( + &self, + input: BatchEnableStandardsRequest, + ) -> RusotoFuture; + + ///

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub. The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

+ fn batch_import_findings( + &self, + input: BatchImportFindingsRequest, + ) -> RusotoFuture; + + ///

Creates a custom action target in Security Hub. You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

+ fn create_action_target( + &self, + input: CreateActionTargetRequest, + ) -> RusotoFuture; + + ///

Creates a custom insight in Security Hub. An insight is a consolidation of findings that relate to a security issue that requires attention or remediation. Use the GroupByAttribute to group the related findings in the insight.

+ fn create_insight( + &self, + input: CreateInsightRequest, + ) -> RusotoFuture; + + ///

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. You can use the EnableSecurityHub to enable Security Hub.

After you use CreateMembers to create member account associations in Security Hub, you need to use the InviteMembers action, which invites the accounts to enable Security Hub and become member accounts in Security Hub. If the invitation is accepted by the account owner, the account becomes a member account in Security Hub, and a permission policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start being sent to both the member and master accounts.

You can remove the association between the master and member accounts by using the DisassociateFromMasterAccount or DisassociateMembers operation.

+ fn create_members( + &self, + input: CreateMembersRequest, + ) -> RusotoFuture; + + ///

Declines invitations to become a member account.

+ fn decline_invitations( + &self, + input: DeclineInvitationsRequest, + ) -> RusotoFuture; + + ///

Deletes a custom action target from Security Hub. Deleting a custom action target doesn't affect any findings or insights that were already sent to Amazon CloudWatch Events using the custom action.

+ fn delete_action_target( + &self, + input: DeleteActionTargetRequest, + ) -> RusotoFuture; + + ///

Deletes the insight specified by the InsightArn.

+ fn delete_insight( + &self, + input: DeleteInsightRequest, + ) -> RusotoFuture; + + ///

Deletes invitations received by the AWS account to become a member account.

+ fn delete_invitations( + &self, + input: DeleteInvitationsRequest, + ) -> RusotoFuture; + + ///

Deletes the specified member accounts from Security Hub.

+ fn delete_members( + &self, + input: DeleteMembersRequest, + ) -> RusotoFuture; + + ///

Returns a list of the custom action targets in Security Hub in your account.

+ fn describe_action_targets( + &self, + input: DescribeActionTargetsRequest, + ) -> RusotoFuture; + + ///

Returns details about the Hub resource in your account, including the HubArn and the time when you enabled Security Hub.

+ fn describe_hub( + &self, + input: DescribeHubRequest, + ) -> RusotoFuture; + + ///

Returns information about the products available that you can subscribe to and integrate with Security Hub to consolidate findings.

+ fn describe_products( + &self, + input: DescribeProductsRequest, + ) -> RusotoFuture; + + ///

Disables the integration of the specified product with Security Hub. Findings from that product are no longer sent to Security Hub after the integration is disabled.

+ fn disable_import_findings_for_product( + &self, + input: DisableImportFindingsForProductRequest, + ) -> RusotoFuture; + + ///

Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub. When you disable Security Hub for a master account, it doesn't disable Security Hub for any associated member accounts.

When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and can't be recovered. Any standards that were enabled are disabled, and your master and member account associations are removed. If you want to save your existing findings, you must export them before you disable Security Hub.

+ fn disable_security_hub( + &self, + ) -> RusotoFuture; + + ///

Disassociates the current Security Hub member account from the associated master account.

+ fn disassociate_from_master_account( + &self, + ) -> RusotoFuture; + + ///

Disassociates the specified member accounts from the associated master account.

+ fn disassociate_members( + &self, + input: DisassociateMembersRequest, + ) -> RusotoFuture; + + ///

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub. When you enable a product integration, a permission policy that grants permission for the product to send findings to Security Hub is applied.

+ fn enable_import_findings_for_product( + &self, + input: EnableImportFindingsForProductRequest, + ) -> RusotoFuture; + + ///

Enables Security Hub for your account in the current Region or the Region you specify in the request. When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from AWS Config, Amazon GuardDuty, Amazon Inspector, and Amazon Macie. To learn more, see Setting Up AWS Security Hub.

+ fn enable_security_hub( + &self, + input: EnableSecurityHubRequest, + ) -> RusotoFuture; + + ///

Returns a list of the standards that are currently enabled.

+ fn get_enabled_standards( + &self, + input: GetEnabledStandardsRequest, + ) -> RusotoFuture; + + ///

Returns a list of findings that match the specified criteria.

+ fn get_findings( + &self, + input: GetFindingsRequest, + ) -> RusotoFuture; + + ///

Lists the results of the Security Hub insight that the insight ARN specifies.

+ fn get_insight_results( + &self, + input: GetInsightResultsRequest, + ) -> RusotoFuture; + + ///

Lists and describes insights that insight ARNs specify.

+ fn get_insights( + &self, + input: GetInsightsRequest, + ) -> RusotoFuture; + + ///

Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation.

+ fn get_invitations_count( + &self, + ) -> RusotoFuture; + + ///

Provides the details for the Security Hub master account to the current member account.

+ fn get_master_account(&self) -> RusotoFuture; + + ///

Returns the details on the Security Hub member accounts that the account IDs specify.

+ fn get_members( + &self, + input: GetMembersRequest, + ) -> RusotoFuture; + + ///

Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from. Before you can use this action to invite a member, you must first create the member account in Security Hub by using the CreateMembers action. When the account owner accepts the invitation to become a member account and enables Security Hub, the master account can view the findings generated from member account.

+ fn invite_members( + &self, + input: InviteMembersRequest, + ) -> RusotoFuture; + + ///

Lists all findings-generating solutions (products) whose findings you have subscribed to receive in Security Hub.

+ fn list_enabled_products_for_import( + &self, + input: ListEnabledProductsForImportRequest, + ) -> RusotoFuture; + + ///

Lists all Security Hub membership invitations that were sent to the current AWS account.

+ fn list_invitations( + &self, + input: ListInvitationsRequest, + ) -> RusotoFuture; + + ///

Lists details about all member accounts for the current Security Hub master account.

+ fn list_members( + &self, + input: ListMembersRequest, + ) -> RusotoFuture; + + ///

Returns a list of tags associated with a resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + + ///

Adds one or more tags to a resource.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + ///

Removes one or more tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + + ///

Updates the name and description of a custom action target in Security Hub.

+ fn update_action_target( + &self, + input: UpdateActionTargetRequest, + ) -> RusotoFuture; + + ///

Updates the Note and RecordState of the Security Hub-aggregated findings that the filter attributes specify. Any member account that can view the finding also sees the update to the finding.

+ fn update_findings( + &self, + input: UpdateFindingsRequest, + ) -> RusotoFuture; + + ///

Updates the Security Hub insight that the insight ARN specifies.

+ fn update_insight( + &self, + input: UpdateInsightRequest, + ) -> RusotoFuture; +} +/// A client for the AWS SecurityHub API. +#[derive(Clone)] +pub struct SecurityHubClient { + client: Client, + region: region::Region, +} + +impl SecurityHubClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> SecurityHubClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> SecurityHubClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SecurityHubClient { + SecurityHubClient { client, region } + } +} + +impl SecurityHub for SecurityHubClient { + ///

Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from. When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account.

+ fn accept_invitation( + &self, + input: AcceptInvitationRequest, + ) -> RusotoFuture { + let request_uri = "/master"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(AcceptInvitationError::from_response(response))), + ) + } + }) + } + + ///

Disables the standards specified by the provided StandardsSubscriptionArns. For more information, see Standards Supported in AWS Security Hub.

+ fn batch_disable_standards( + &self, + input: BatchDisableStandardsRequest, + ) -> RusotoFuture { + let request_uri = "/standards/deregister"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(BatchDisableStandardsError::from_response(response)) + }), + ) + } + }) + } + + ///

Enables the standards specified by the provided standardsArn. In this release, only CIS AWS Foundations standards are supported. For more information, see Standards Supported in AWS Security Hub.

+ fn batch_enable_standards( + &self, + input: BatchEnableStandardsRequest, + ) -> RusotoFuture { + let request_uri = "/standards/register"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(BatchEnableStandardsError::from_response(response)) + }), + ) + } + }) + } + + ///

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub. The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

+ fn batch_import_findings( + &self, + input: BatchImportFindingsRequest, + ) -> RusotoFuture { + let request_uri = "/findings/import"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(BatchImportFindingsError::from_response(response)) + }), + ) + } + }) + } + + ///

Creates a custom action target in Security Hub. You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

+ fn create_action_target( + &self, + input: CreateActionTargetRequest, + ) -> RusotoFuture { + let request_uri = "/actionTargets"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateActionTargetError::from_response(response))), + ) + } + }) + } + + ///

Creates a custom insight in Security Hub. An insight is a consolidation of findings that relate to a security issue that requires attention or remediation. Use the GroupByAttribute to group the related findings in the insight.

+ fn create_insight( + &self, + input: CreateInsightRequest, + ) -> RusotoFuture { + let request_uri = "/insights"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateInsightError::from_response(response))), + ) + } + }) + } + + ///

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. You can use the EnableSecurityHub to enable Security Hub.

After you use CreateMembers to create member account associations in Security Hub, you need to use the InviteMembers action, which invites the accounts to enable Security Hub and become member accounts in Security Hub. If the invitation is accepted by the account owner, the account becomes a member account in Security Hub, and a permission policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start being sent to both the member and master accounts.

You can remove the association between the master and member accounts by using the DisassociateFromMasterAccount or DisassociateMembers operation.

+ fn create_members( + &self, + input: CreateMembersRequest, + ) -> RusotoFuture { + let request_uri = "/members"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateMembersError::from_response(response))), + ) + } + }) + } + + ///

Declines invitations to become a member account.

+ fn decline_invitations( + &self, + input: DeclineInvitationsRequest, + ) -> RusotoFuture { + let request_uri = "/invitations/decline"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeclineInvitationsError::from_response(response))), + ) + } + }) + } + + ///

Deletes a custom action target from Security Hub. Deleting a custom action target doesn't affect any findings or insights that were already sent to Amazon CloudWatch Events using the custom action.

+ fn delete_action_target( + &self, + input: DeleteActionTargetRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/actionTargets/{action_target_arn}", + action_target_arn = input.action_target_arn + ); + + let mut request = SignedRequest::new("DELETE", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteActionTargetError::from_response(response))), + ) + } + }) + } + + ///

Deletes the insight specified by the InsightArn.

+ fn delete_insight( + &self, + input: DeleteInsightRequest, + ) -> RusotoFuture { + let request_uri = format!("/insights/{insight_arn}", insight_arn = input.insight_arn); + + let mut request = SignedRequest::new("DELETE", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteInsightError::from_response(response))), + ) + } + }) + } + + ///

Deletes invitations received by the AWS account to become a member account.

+ fn delete_invitations( + &self, + input: DeleteInvitationsRequest, + ) -> RusotoFuture { + let request_uri = "/invitations/delete"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteInvitationsError::from_response(response))), + ) + } + }) + } + + ///

Deletes the specified member accounts from Security Hub.

+ fn delete_members( + &self, + input: DeleteMembersRequest, + ) -> RusotoFuture { + let request_uri = "/members/delete"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteMembersError::from_response(response))), + ) + } + }) + } + + ///

Returns a list of the custom action targets in Security Hub in your account.

+ fn describe_action_targets( + &self, + input: DescribeActionTargetsRequest, + ) -> RusotoFuture { + let request_uri = "/actionTargets/get"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DescribeActionTargetsError::from_response(response)) + }), + ) + } + }) + } + + ///

Returns details about the Hub resource in your account, including the HubArn and the time when you enabled Security Hub.

+ fn describe_hub( + &self, + input: DescribeHubRequest, + ) -> RusotoFuture { + let request_uri = "/accounts"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.hub_arn { + params.put("HubArn", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeHubError::from_response(response))), + ) + } + }) + } + + ///

Returns information about the products available that you can subscribe to and integrate with Security Hub to consolidate findings.

+ fn describe_products( + &self, + input: DescribeProductsRequest, + ) -> RusotoFuture { + let request_uri = "/products"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("MaxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("NextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeProductsError::from_response(response))), + ) + } + }) + } + + ///

Disables the integration of the specified product with Security Hub. Findings from that product are no longer sent to Security Hub after the integration is disabled.

+ fn disable_import_findings_for_product( + &self, + input: DisableImportFindingsForProductRequest, + ) -> RusotoFuture + { + let request_uri = format!( + "/productSubscriptions/{product_subscription_arn}", + product_subscription_arn = input.product_subscription_arn + ); + + let mut request = SignedRequest::new("DELETE", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DisableImportFindingsForProductError::from_response( + response, + )) + })) + } + }) + } + + ///

Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub. When you disable Security Hub for a master account, it doesn't disable Security Hub for any associated member accounts.

When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and can't be recovered. Any standards that were enabled are disabled, and your master and member account associations are removed. If you want to save your existing findings, you must export them before you disable Security Hub.

+ fn disable_security_hub( + &self, + ) -> RusotoFuture { + let request_uri = "/accounts"; + + let mut request = SignedRequest::new("DELETE", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DisableSecurityHubError::from_response(response))), + ) + } + }) + } + + ///

Disassociates the current Security Hub member account from the associated master account.

+ fn disassociate_from_master_account( + &self, + ) -> RusotoFuture + { + let request_uri = "/master/disassociate"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DisassociateFromMasterAccountError::from_response(response)) + })) + } + }) + } + + ///

Disassociates the specified member accounts from the associated master account.

+ fn disassociate_members( + &self, + input: DisassociateMembersRequest, + ) -> RusotoFuture { + let request_uri = "/members/disassociate"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(DisassociateMembersError::from_response(response)) + }), + ) + } + }) + } + + ///

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub. When you enable a product integration, a permission policy that grants permission for the product to send findings to Security Hub is applied.

+ fn enable_import_findings_for_product( + &self, + input: EnableImportFindingsForProductRequest, + ) -> RusotoFuture + { + let request_uri = "/productSubscriptions"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(EnableImportFindingsForProductError::from_response(response)) + })) + } + }) + } + + ///

Enables Security Hub for your account in the current Region or the Region you specify in the request. When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from AWS Config, Amazon GuardDuty, Amazon Inspector, and Amazon Macie. To learn more, see Setting Up AWS Security Hub.

+ fn enable_security_hub( + &self, + input: EnableSecurityHubRequest, + ) -> RusotoFuture { + let request_uri = "/accounts"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(EnableSecurityHubError::from_response(response))), + ) + } + }) + } + + ///

Returns a list of the standards that are currently enabled.

+ fn get_enabled_standards( + &self, + input: GetEnabledStandardsRequest, + ) -> RusotoFuture { + let request_uri = "/standards/get"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(GetEnabledStandardsError::from_response(response)) + }), + ) + } + }) + } + + ///

Returns a list of findings that match the specified criteria.

+ fn get_findings( + &self, + input: GetFindingsRequest, + ) -> RusotoFuture { + let request_uri = "/findings"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetFindingsError::from_response(response))), + ) + } + }) + } + + ///

Lists the results of the Security Hub insight that the insight ARN specifies.

+ fn get_insight_results( + &self, + input: GetInsightResultsRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/insights/results/{insight_arn}", + insight_arn = input.insight_arn + ); + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetInsightResultsError::from_response(response))), + ) + } + }) + } + + ///

Lists and describes insights that insight ARNs specify.

+ fn get_insights( + &self, + input: GetInsightsRequest, + ) -> RusotoFuture { + let request_uri = "/insights/get"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetInsightsError::from_response(response))), + ) + } + }) + } + + ///

Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation.

+ fn get_invitations_count( + &self, + ) -> RusotoFuture { + let request_uri = "/invitations/count"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(GetInvitationsCountError::from_response(response)) + }), + ) + } + }) + } + + ///

Provides the details for the Security Hub master account to the current member account.

+ fn get_master_account(&self) -> RusotoFuture { + let request_uri = "/master"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMasterAccountError::from_response(response))), + ) + } + }) + } + + ///

Returns the details on the Security Hub member accounts that the account IDs specify.

+ fn get_members( + &self, + input: GetMembersRequest, + ) -> RusotoFuture { + let request_uri = "/members/get"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetMembersError::from_response(response))), + ) + } + }) + } + + ///

Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from. Before you can use this action to invite a member, you must first create the member account in Security Hub by using the CreateMembers action. When the account owner accepts the invitation to become a member account and enables Security Hub, the master account can view the findings generated from member account.

+ fn invite_members( + &self, + input: InviteMembersRequest, + ) -> RusotoFuture { + let request_uri = "/members/invite"; + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(InviteMembersError::from_response(response))), + ) + } + }) + } + + ///

Lists all findings-generating solutions (products) whose findings you have subscribed to receive in Security Hub.

+ fn list_enabled_products_for_import( + &self, + input: ListEnabledProductsForImportRequest, + ) -> RusotoFuture { + let request_uri = "/productSubscriptions"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("MaxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("NextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(ListEnabledProductsForImportError::from_response(response)) + })) + } + }) + } + + ///

Lists all Security Hub membership invitations that were sent to the current AWS account.

+ fn list_invitations( + &self, + input: ListInvitationsRequest, + ) -> RusotoFuture { + let request_uri = "/invitations"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("MaxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("NextToken", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListInvitationsError::from_response(response))), + ) + } + }) + } + + ///

Lists details about all member accounts for the current Security Hub master account.

+ fn list_members( + &self, + input: ListMembersRequest, + ) -> RusotoFuture { + let request_uri = "/members"; + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + if let Some(ref x) = input.max_results { + params.put("MaxResults", x); + } + if let Some(ref x) = input.next_token { + params.put("NextToken", x); + } + if let Some(ref x) = input.only_associated { + params.put("OnlyAssociated", x); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListMembersError::from_response(response))), + ) + } + }) + } + + ///

Returns a list of tags associated with a resource.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("GET", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + + ///

Adds one or more tags to a resource.

+ fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("POST", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Removes one or more tags from a resource.

+ fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn); + + let mut request = SignedRequest::new("DELETE", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let mut params = Params::new(); + for item in input.tag_keys.iter() { + params.put("tagKeys", item); + } + request.set_params(params); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + + ///

Updates the name and description of a custom action target in Security Hub.

+ fn update_action_target( + &self, + input: UpdateActionTargetRequest, + ) -> RusotoFuture { + let request_uri = format!( + "/actionTargets/{action_target_arn}", + action_target_arn = input.action_target_arn + ); + + let mut request = SignedRequest::new("PATCH", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateActionTargetError::from_response(response))), + ) + } + }) + } + + ///

Updates the Note and RecordState of the Security Hub-aggregated findings that the filter attributes specify. Any member account that can view the finding also sees the update to the finding.

+ fn update_findings( + &self, + input: UpdateFindingsRequest, + ) -> RusotoFuture { + let request_uri = "/findings"; + + let mut request = SignedRequest::new("PATCH", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateFindingsError::from_response(response))), + ) + } + }) + } + + ///

Updates the Security Hub insight that the insight ARN specifies.

+ fn update_insight( + &self, + input: UpdateInsightRequest, + ) -> RusotoFuture { + let request_uri = format!("/insights/{insight_arn}", insight_arn = input.insight_arn); + + let mut request = SignedRequest::new("PATCH", "securityhub", &self.region, &request_uri); + request.set_content_type("application/x-amz-json-1.1".to_owned()); + + let encoded = Some(serde_json::to_vec(&input).unwrap()); + request.set_payload(encoded); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + let result = proto::json::ResponsePayload::new(&response) + .deserialize::()?; + + Ok(result) + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateInsightError::from_response(response))), + ) + } + }) + } +} diff --git a/rusoto/services/securityhub/src/lib.rs b/rusoto/services/securityhub/src/lib.rs new file mode 100644 index 00000000000..fe42aeb8e3c --- /dev/null +++ b/rusoto/services/securityhub/src/lib.rs @@ -0,0 +1,32 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the compliance status of your environment based on CIS AWS Foundations compliance checks. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the AWS Security Hub User Guide .

When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to. For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the master account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invite was sent from.

+//! +//! If you're using the service, you're probably looking for [SecurityHubClient](struct.SecurityHubClient.html) and [SecurityHub](trait.SecurityHub.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/serverlessrepo/Cargo.toml b/rusoto/services/serverlessrepo/Cargo.toml index 6fd1cb49910..a7f09dd4ea6 100644 --- a/rusoto/services/serverlessrepo/Cargo.toml +++ b/rusoto/services/serverlessrepo/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_serverlessrepo" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/serverlessrepo/README.md b/rusoto/services/serverlessrepo/README.md index bfd17db8810..f29d0fe4e86 100644 --- a/rusoto/services/serverlessrepo/README.md +++ b/rusoto/services/serverlessrepo/README.md @@ -23,9 +23,16 @@ To use `rusoto_serverlessrepo` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_serverlessrepo = "0.40.0" +rusoto_serverlessrepo = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/serverlessrepo/src/custom/mod.rs b/rusoto/services/serverlessrepo/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/serverlessrepo/src/custom/mod.rs +++ b/rusoto/services/serverlessrepo/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/serverlessrepo/src/generated.rs b/rusoto/services/serverlessrepo/src/generated.rs index e3ba6c7ccb3..2d8aaf922ca 100644 --- a/rusoto/services/serverlessrepo/src/generated.rs +++ b/rusoto/services/serverlessrepo/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -27,7 +26,7 @@ use rusoto_core::signature::SignedRequest; use serde_json; ///

A nested application summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplicationDependencySummary { ///

The Amazon Resource Name (ARN) of the nested application.

#[serde(rename = "ApplicationId")] @@ -55,7 +54,7 @@ pub struct ApplicationPolicyStatement { ///

Summary of details about the application.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ApplicationSummary { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -154,7 +153,7 @@ pub struct CreateApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApplicationResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -229,7 +228,7 @@ pub struct CreateApplicationVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateApplicationVersionResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -381,7 +380,7 @@ pub struct CreateCloudFormationChangeSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCloudFormationChangeSetResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -417,7 +416,7 @@ pub struct CreateCloudFormationTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCloudFormationTemplateResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -469,7 +468,7 @@ pub struct GetApplicationPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApplicationPolicyResponse { ///

An array of policy statements applied to the application.

#[serde(rename = "Statements")] @@ -489,7 +488,7 @@ pub struct GetApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetApplicationResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -548,7 +547,7 @@ pub struct GetCloudFormationTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCloudFormationTemplateResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -605,7 +604,7 @@ pub struct ListApplicationDependenciesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApplicationDependenciesResponse { ///

An array of application summaries nested in the application.

#[serde(rename = "Dependencies")] @@ -633,7 +632,7 @@ pub struct ListApplicationVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApplicationVersionsResponse { ///

The token to request the next page of results.

#[serde(rename = "NextToken")] @@ -658,7 +657,7 @@ pub struct ListApplicationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListApplicationsResponse { ///

An array of application summaries.

#[serde(rename = "Applications")] @@ -672,7 +671,7 @@ pub struct ListApplicationsResponse { ///

Parameters supported by the application.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterDefinition { ///

A regular expression that represents the patterns to allow for String types.

#[serde(rename = "AllowedPattern")] @@ -766,7 +765,7 @@ pub struct PutApplicationPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutApplicationPolicyResponse { ///

An array of policy statements applied to the application.

#[serde(rename = "Statements")] @@ -852,7 +851,7 @@ pub struct UpdateApplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateApplicationResponse { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -902,7 +901,7 @@ pub struct UpdateApplicationResponse { ///

Application version details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Version { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -962,7 +961,7 @@ pub struct Version { ///

An application version summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VersionSummary { ///

The application Amazon Resource Name (ARN).

#[serde(rename = "ApplicationId")] @@ -1881,10 +1880,7 @@ impl ServerlessRepoClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ServerlessRepoClient { - ServerlessRepoClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1898,10 +1894,14 @@ impl ServerlessRepoClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ServerlessRepoClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ServerlessRepoClient { + ServerlessRepoClient { client, region } } } diff --git a/rusoto/services/servicecatalog/Cargo.toml b/rusoto/services/servicecatalog/Cargo.toml index b1b9e1958e0..ed2dd9e4366 100644 --- a/rusoto/services/servicecatalog/Cargo.toml +++ b/rusoto/services/servicecatalog/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_servicecatalog" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/servicecatalog/README.md b/rusoto/services/servicecatalog/README.md index 181db390335..b9b5d5f094c 100644 --- a/rusoto/services/servicecatalog/README.md +++ b/rusoto/services/servicecatalog/README.md @@ -23,9 +23,16 @@ To use `rusoto_servicecatalog` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_servicecatalog = "0.40.0" +rusoto_servicecatalog = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/servicecatalog/src/custom/mod.rs b/rusoto/services/servicecatalog/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/servicecatalog/src/custom/mod.rs +++ b/rusoto/services/servicecatalog/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/servicecatalog/src/generated.rs b/rusoto/services/servicecatalog/src/generated.rs index 0b776308d78..c506f3ec80e 100644 --- a/rusoto/services/servicecatalog/src/generated.rs +++ b/rusoto/services/servicecatalog/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -40,7 +39,7 @@ pub struct AcceptPortfolioShareInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AcceptPortfolioShareOutput {} ///

The access level to use to filter results.

@@ -67,7 +66,7 @@ pub struct AssociateBudgetWithResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateBudgetWithResourceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -88,7 +87,7 @@ pub struct AssociatePrincipalWithPortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociatePrincipalWithPortfolioOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -110,7 +109,7 @@ pub struct AssociateProductWithPortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateProductWithPortfolioOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -131,7 +130,7 @@ pub struct AssociateServiceActionWithProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateServiceActionWithProvisioningArtifactOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -145,7 +144,7 @@ pub struct AssociateTagOptionWithResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateTagOptionWithResourceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -160,7 +159,7 @@ pub struct BatchAssociateServiceActionWithProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchAssociateServiceActionWithProvisioningArtifactOutput { ///

An object that contains a list of errors, along with information to help you identify the self-service action.

#[serde(rename = "FailedServiceActionAssociations")] @@ -180,7 +179,7 @@ pub struct BatchDisassociateServiceActionFromProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchDisassociateServiceActionFromProvisioningArtifactOutput { ///

An object that contains a list of errors, along with information to help you identify the self-service action.

#[serde(rename = "FailedServiceActionAssociations")] @@ -190,7 +189,7 @@ pub struct BatchDisassociateServiceActionFromProvisioningArtifactOutput { ///

Information about a budget.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BudgetDetail { ///

Name of the associated budget.

#[serde(rename = "BudgetName")] @@ -200,7 +199,7 @@ pub struct BudgetDetail { ///

Information about a CloudWatch dashboard.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CloudWatchDashboard { ///

The name of the CloudWatch dashboard.

#[serde(rename = "Name")] @@ -210,7 +209,7 @@ pub struct CloudWatchDashboard { ///

Information about a constraint.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConstraintDetail { ///

The identifier of the constraint.

#[serde(rename = "ConstraintId")] @@ -232,7 +231,7 @@ pub struct ConstraintDetail { ///

Summary information about a constraint.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ConstraintSummary { ///

The description of the constraint.

#[serde(rename = "Description")] @@ -276,7 +275,7 @@ pub struct CopyProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CopyProductOutput { ///

The token to use to track the progress of the operation.

#[serde(rename = "CopyProductToken")] @@ -312,7 +311,7 @@ pub struct CreateConstraintInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateConstraintOutput { ///

Information about the constraint.

#[serde(rename = "ConstraintDetail")] @@ -354,7 +353,7 @@ pub struct CreatePortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePortfolioOutput { ///

Information about the portfolio.

#[serde(rename = "PortfolioDetail")] @@ -386,7 +385,7 @@ pub struct CreatePortfolioShareInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePortfolioShareOutput { ///

The portfolio share unique identifier. This will only be returned if portfolio is shared to an organization node.

#[serde(rename = "PortfolioShareToken")] @@ -442,7 +441,7 @@ pub struct CreateProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProductOutput { ///

Information about the product view.

#[serde(rename = "ProductViewDetail")] @@ -494,14 +493,14 @@ pub struct CreateProvisionedProductPlanInput { #[serde(rename = "ProvisioningParameters")] #[serde(skip_serializing_if = "Option::is_none")] pub provisioning_parameters: Option>, - ///

One or more tags.

+ ///

One or more tags.

If the plan is for an existing provisioned product, the product must have a RESOURCE_UPDATE constraint with TagUpdatesOnProvisionedProduct set to ALLOWED to allow tag updates.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProvisionedProductPlanOutput { ///

The plan identifier.

#[serde(rename = "PlanId")] @@ -543,7 +542,7 @@ pub struct CreateProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProvisioningArtifactOutput { ///

The URL of the CloudFormation template in Amazon S3, in JSON format.

#[serde(rename = "Info")] @@ -584,7 +583,7 @@ pub struct CreateServiceActionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateServiceActionOutput { ///

An object containing information about the self-service action.

#[serde(rename = "ServiceActionDetail")] @@ -603,7 +602,7 @@ pub struct CreateTagOptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTagOptionOutput { ///

Information about the TagOption.

#[serde(rename = "TagOptionDetail")] @@ -623,7 +622,7 @@ pub struct DeleteConstraintInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteConstraintOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -638,7 +637,7 @@ pub struct DeletePortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePortfolioOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -661,7 +660,7 @@ pub struct DeletePortfolioShareInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePortfolioShareOutput { ///

The portfolio share unique identifier. This will only be returned if delete is made to an organization node.

#[serde(rename = "PortfolioShareToken")] @@ -681,7 +680,7 @@ pub struct DeleteProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProductOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -700,7 +699,7 @@ pub struct DeleteProvisionedProductPlanInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProvisionedProductPlanOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -718,7 +717,7 @@ pub struct DeleteProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProvisioningArtifactOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -733,7 +732,7 @@ pub struct DeleteServiceActionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteServiceActionOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -744,7 +743,7 @@ pub struct DeleteTagOptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTagOptionOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -759,7 +758,7 @@ pub struct DescribeConstraintInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeConstraintOutput { ///

Information about the constraint.

#[serde(rename = "ConstraintDetail")] @@ -787,7 +786,7 @@ pub struct DescribeCopyProductStatusInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCopyProductStatusOutput { ///

The status of the copy product operation.

#[serde(rename = "CopyProductStatus")] @@ -815,7 +814,7 @@ pub struct DescribePortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePortfolioOutput { ///

Information about the associated budgets.

#[serde(rename = "Budgets")] @@ -843,7 +842,7 @@ pub struct DescribePortfolioShareStatusInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePortfolioShareStatusOutput { ///

Organization node identifier. It can be either account id, organizational unit id or organization id.

#[serde(rename = "OrganizationNodeValue")] @@ -879,7 +878,7 @@ pub struct DescribeProductAsAdminInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProductAsAdminOutput { ///

Information about the associated budgets.

#[serde(rename = "Budgets")] @@ -915,7 +914,7 @@ pub struct DescribeProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProductOutput { ///

Information about the associated budgets.

#[serde(rename = "Budgets")] @@ -943,7 +942,7 @@ pub struct DescribeProductViewInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProductViewOutput { ///

Summary information about the product.

#[serde(rename = "ProductViewSummary")] @@ -967,7 +966,7 @@ pub struct DescribeProvisionedProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProvisionedProductOutput { ///

Any CloudWatch dashboards that were created when provisioning the product.

#[serde(rename = "CloudWatchDashboards")] @@ -999,7 +998,7 @@ pub struct DescribeProvisionedProductPlanInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProvisionedProductPlanOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1034,7 +1033,7 @@ pub struct DescribeProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProvisioningArtifactOutput { ///

The URL of the CloudFormation template in Amazon S3.

#[serde(rename = "Info")] @@ -1069,7 +1068,7 @@ pub struct DescribeProvisioningParametersInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProvisioningParametersOutput { ///

Information about the constraints used to provision the product.

#[serde(rename = "ConstraintSummaries")] @@ -1113,7 +1112,7 @@ pub struct DescribeRecordInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRecordOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1129,6 +1128,25 @@ pub struct DescribeRecordOutput { pub record_outputs: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeServiceActionExecutionParametersInput { + #[serde(rename = "AcceptLanguage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub accept_language: Option, + #[serde(rename = "ProvisionedProductId")] + pub provisioned_product_id: String, + #[serde(rename = "ServiceActionId")] + pub service_action_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeServiceActionExecutionParametersOutput { + #[serde(rename = "ServiceActionParameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub service_action_parameters: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeServiceActionInput { ///

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

@@ -1141,7 +1159,7 @@ pub struct DescribeServiceActionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeServiceActionOutput { ///

Detailed information about the self-service action.

#[serde(rename = "ServiceActionDetail")] @@ -1157,7 +1175,7 @@ pub struct DescribeTagOptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTagOptionOutput { ///

Information about the TagOption.

#[serde(rename = "TagOptionDetail")] @@ -1169,7 +1187,7 @@ pub struct DescribeTagOptionOutput { pub struct DisableAWSOrganizationsAccessInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableAWSOrganizationsAccessOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1183,7 +1201,7 @@ pub struct DisassociateBudgetFromResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateBudgetFromResourceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1201,7 +1219,7 @@ pub struct DisassociatePrincipalFromPortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociatePrincipalFromPortfolioOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1219,7 +1237,7 @@ pub struct DisassociateProductFromPortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateProductFromPortfolioOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1240,7 +1258,7 @@ pub struct DisassociateServiceActionFromProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateServiceActionFromProvisioningArtifactOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1254,14 +1272,14 @@ pub struct DisassociateTagOptionFromResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateTagOptionFromResourceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct EnableAWSOrganizationsAccessInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EnableAWSOrganizationsAccessOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1279,7 +1297,7 @@ pub struct ExecuteProvisionedProductPlanInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecuteProvisionedProductPlanOutput { ///

Information about the result of provisioning the product.

#[serde(rename = "RecordDetail")] @@ -1296,6 +1314,9 @@ pub struct ExecuteProvisionedProductServiceActionInput { ///

An idempotency token that uniquely identifies the execute request.

#[serde(rename = "ExecuteToken")] pub execute_token: String, + #[serde(rename = "Parameters")] + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option<::std::collections::HashMap>>, ///

The identifier of the provisioned product.

#[serde(rename = "ProvisionedProductId")] pub provisioned_product_id: String, @@ -1305,7 +1326,7 @@ pub struct ExecuteProvisionedProductServiceActionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecuteProvisionedProductServiceActionOutput { ///

An object containing detailed information about the result of provisioning the product.

#[serde(rename = "RecordDetail")] @@ -1313,9 +1334,23 @@ pub struct ExecuteProvisionedProductServiceActionOutput { pub record_detail: Option, } +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ExecutionParameter { + #[serde(rename = "DefaultValues")] + #[serde(skip_serializing_if = "Option::is_none")] + pub default_values: Option>, + #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option, +} + ///

An object containing information about the error, along with identifying information about the self-service action and its associations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedServiceActionAssociation { ///

The error code. Valid values are listed below.

#[serde(rename = "ErrorCode")] @@ -1343,7 +1378,7 @@ pub struct FailedServiceActionAssociation { pub struct GetAWSOrganizationsAccessStatusInput {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAWSOrganizationsAccessStatusOutput { ///

The status of the portfolio share feature.

#[serde(rename = "AccessStatus")] @@ -1353,7 +1388,7 @@ pub struct GetAWSOrganizationsAccessStatusOutput { ///

Summary information about a product path for a user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LaunchPathSummary { ///

The constraints on the portfolio-product relationship.

#[serde(rename = "ConstraintSummaries")] @@ -1394,7 +1429,7 @@ pub struct ListAcceptedPortfolioSharesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAcceptedPortfolioSharesOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1426,7 +1461,7 @@ pub struct ListBudgetsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListBudgetsForResourceOutput { ///

Information about the associated budgets.

#[serde(rename = "Budgets")] @@ -1462,7 +1497,7 @@ pub struct ListConstraintsForPortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListConstraintsForPortfolioOutput { ///

Information about the constraints.

#[serde(rename = "ConstraintDetails")] @@ -1494,7 +1529,7 @@ pub struct ListLaunchPathsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLaunchPathsOutput { ///

Information about the launch path.

#[serde(rename = "LaunchPathSummaries")] @@ -1529,7 +1564,7 @@ pub struct ListOrganizationPortfolioAccessInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOrganizationPortfolioAccessOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1553,7 +1588,7 @@ pub struct ListPortfolioAccessInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPortfolioAccessOutput { ///

Information about the AWS accounts with access to the portfolio.

#[serde(rename = "AccountIds")] @@ -1585,7 +1620,7 @@ pub struct ListPortfoliosForProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPortfoliosForProductOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1614,7 +1649,7 @@ pub struct ListPortfoliosInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPortfoliosOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1646,7 +1681,7 @@ pub struct ListPrincipalsForPortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListPrincipalsForPortfolioOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1683,7 +1718,7 @@ pub struct ListProvisionedProductPlansInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProvisionedProductPlansOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1715,7 +1750,7 @@ pub struct ListProvisioningArtifactsForServiceActionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProvisioningArtifactsForServiceActionOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1739,7 +1774,7 @@ pub struct ListProvisioningArtifactsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProvisioningArtifactsOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1776,7 +1811,7 @@ pub struct ListRecordHistoryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRecordHistoryOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1821,7 +1856,7 @@ pub struct ListResourcesForTagOptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourcesForTagOptionOutput { ///

The page token for the next set of results. To retrieve the first set of results, use null.

#[serde(rename = "PageToken")] @@ -1856,7 +1891,7 @@ pub struct ListServiceActionsForProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListServiceActionsForProvisioningArtifactOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1885,7 +1920,7 @@ pub struct ListServiceActionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListServiceActionsOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1917,7 +1952,7 @@ pub struct ListStackInstancesForProvisionedProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStackInstancesForProvisionedProductOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -1963,7 +1998,7 @@ pub struct ListTagOptionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagOptionsOutput { ///

The page token for the next set of results. To retrieve the first set of results, use null.

#[serde(rename = "PageToken")] @@ -1990,7 +2025,7 @@ pub struct OrganizationNode { ///

The constraints that the administrator has put on the parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterConstraints { ///

The values that the administrator has allowed for the parameter.

#[serde(rename = "AllowedValues")] @@ -2000,7 +2035,7 @@ pub struct ParameterConstraints { ///

Information about a portfolio.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PortfolioDetail { ///

The ARN assigned to the portfolio.

#[serde(rename = "ARN")] @@ -2030,7 +2065,7 @@ pub struct PortfolioDetail { ///

Information about a principal.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Principal { ///

The ARN of the principal (IAM user, role, or group).

#[serde(rename = "PrincipalARN")] @@ -2044,7 +2079,7 @@ pub struct Principal { ///

A single product view aggregation value/count pair, containing metadata about each product to which the calling user has access.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProductViewAggregationValue { ///

An approximate count of the products that match the value.

#[serde(rename = "ApproximateCount")] @@ -2058,7 +2093,7 @@ pub struct ProductViewAggregationValue { ///

Information about a product view.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProductViewDetail { ///

The UTC time stamp of the creation time.

#[serde(rename = "CreatedTime")] @@ -2080,7 +2115,7 @@ pub struct ProductViewDetail { ///

Summary information about a product view.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProductViewSummary { ///

The distributor of the product. Contact the product administrator for the significance of this value.

#[serde(rename = "Distributor")] @@ -2169,7 +2204,7 @@ pub struct ProvisionProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionProductOutput { ///

Information about the result of provisioning the product.

#[serde(rename = "RecordDetail")] @@ -2179,7 +2214,7 @@ pub struct ProvisionProductOutput { ///

Information about a provisioned product.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionedProductAttribute { ///

The ARN of the provisioned product.

#[serde(rename = "Arn")] @@ -2245,7 +2280,7 @@ pub struct ProvisionedProductAttribute { ///

Information about a provisioned product.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionedProductDetail { ///

The ARN of the provisioned product.

#[serde(rename = "Arn")] @@ -2295,7 +2330,7 @@ pub struct ProvisionedProductDetail { ///

Information about a plan.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionedProductPlanDetails { ///

The UTC time stamp of the creation time.

#[serde(rename = "CreatedTime")] @@ -2361,7 +2396,7 @@ pub struct ProvisionedProductPlanDetails { ///

Summary information about a plan.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisionedProductPlanSummary { ///

The plan identifier.

#[serde(rename = "PlanId")] @@ -2391,7 +2426,7 @@ pub struct ProvisionedProductPlanSummary { ///

Information about a provisioning artifact. A provisioning artifact is also known as a product version.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisioningArtifact { ///

The UTC time stamp of the creation time.

#[serde(rename = "CreatedTime")] @@ -2401,6 +2436,10 @@ pub struct ProvisioningArtifact { #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Information set by the administrator to provide guidance to end users about which provisioning artifacts to use.

+ #[serde(rename = "Guidance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub guidance: Option, ///

The identifier of the provisioning artifact.

#[serde(rename = "Id")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2413,7 +2452,7 @@ pub struct ProvisioningArtifact { ///

Information about a provisioning artifact (also known as a version) for a product.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisioningArtifactDetail { ///

Indicates whether the product version is active.

#[serde(rename = "Active")] @@ -2427,6 +2466,10 @@ pub struct ProvisioningArtifactDetail { #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Information set by the administrator to provide guidance to end users about which provisioning artifacts to use.

+ #[serde(rename = "Guidance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub guidance: Option, ///

The identifier of the provisioning artifact.

#[serde(rename = "Id")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2443,7 +2486,7 @@ pub struct ProvisioningArtifactDetail { ///

Information about a parameter used to provision a product.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisioningArtifactParameter { ///

The default value.

#[serde(rename = "DefaultValue")] @@ -2473,7 +2516,7 @@ pub struct ProvisioningArtifactParameter { ///

The user-defined preferences that will be applied during product provisioning, unless overridden by ProvisioningPreferences or UpdateProvisioningPreferences.

For more information on maximum concurrent accounts and failure tolerance, see Stack set operation options in the AWS CloudFormation User Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisioningArtifactPreferences { ///

One or more AWS accounts where stack instances are deployed from the stack set. These accounts can be scoped in ProvisioningPreferences$StackSetAccounts and UpdateProvisioningPreferences$StackSetAccounts.

Applicable only to a CFN_STACKSET provisioned product type.

#[serde(rename = "StackSetAccounts")] @@ -2511,7 +2554,7 @@ pub struct ProvisioningArtifactProperties { ///

Summary information about a provisioning artifact (also known as a version) for a product.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisioningArtifactSummary { ///

The UTC time stamp of the creation time.

#[serde(rename = "CreatedTime")] @@ -2537,7 +2580,7 @@ pub struct ProvisioningArtifactSummary { ///

An object that contains summary information about a product view and a provisioning artifact.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProvisioningArtifactView { ///

Summary information about a product view.

#[serde(rename = "ProductViewSummary")] @@ -2593,7 +2636,7 @@ pub struct ProvisioningPreferences { ///

Information about a request operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecordDetail { ///

The UTC time stamp of the creation time.

#[serde(rename = "CreatedTime")] @@ -2651,7 +2694,7 @@ pub struct RecordDetail { ///

The error code and description resulting from an operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecordError { ///

The numeric value of the error.

#[serde(rename = "Code")] @@ -2665,7 +2708,7 @@ pub struct RecordError { ///

The output for the product created as the result of a request. For example, the output for a CloudFormation-backed product that creates an S3 bucket would include the S3 bucket URL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecordOutput { ///

The description of the output.

#[serde(rename = "Description")] @@ -2683,7 +2726,7 @@ pub struct RecordOutput { ///

Information about a tag, which is a key-value pair.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecordTag { ///

The key for this tag.

#[serde(rename = "Key")] @@ -2711,12 +2754,12 @@ pub struct RejectPortfolioShareInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RejectPortfolioShareOutput {} ///

Information about a resource change that will occur when a plan is executed.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceChange { ///

The change action.

#[serde(rename = "Action")] @@ -2750,7 +2793,7 @@ pub struct ResourceChange { ///

Information about a change to a resource attribute.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceChangeDetail { ///

The ID of the entity that caused the change.

#[serde(rename = "CausingEntity")] @@ -2768,7 +2811,7 @@ pub struct ResourceChangeDetail { ///

Information about a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceDetail { ///

The ARN of the resource.

#[serde(rename = "ARN")] @@ -2794,7 +2837,7 @@ pub struct ResourceDetail { ///

Information about a change to a resource attribute.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceTargetDefinition { ///

The attribute to be changed.

#[serde(rename = "Attribute")] @@ -2831,7 +2874,7 @@ pub struct ScanProvisionedProductsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScanProvisionedProductsOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -2880,7 +2923,7 @@ pub struct SearchProductsAsAdminInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchProductsAsAdminOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -2921,7 +2964,7 @@ pub struct SearchProductsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchProductsOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -2971,7 +3014,7 @@ pub struct SearchProvisionedProductsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SearchProvisionedProductsOutput { ///

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

#[serde(rename = "NextPageToken")] @@ -3003,7 +3046,7 @@ pub struct ServiceActionAssociation { ///

An object containing detailed information about the self-service action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceActionDetail { ///

A map that defines the self-service action.

#[serde(rename = "Definition")] @@ -3017,7 +3060,7 @@ pub struct ServiceActionDetail { ///

Detailed information about the self-service action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceActionSummary { ///

The self-service action definition type. For example, SSM_AUTOMATION.

#[serde(rename = "DefinitionType")] @@ -3039,7 +3082,7 @@ pub struct ServiceActionSummary { ///

Information about the portfolio share operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ShareDetails { ///

List of errors.

#[serde(rename = "ShareErrors")] @@ -3053,7 +3096,7 @@ pub struct ShareDetails { ///

Errors that occurred during the portfolio share operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ShareError { ///

List of accounts impacted by the error.

#[serde(rename = "Accounts")] @@ -3071,7 +3114,7 @@ pub struct ShareError { ///

An AWS CloudFormation stack, in a specific account and region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StackInstance { ///

The name of the AWS account that the stack instance is associated with.

#[serde(rename = "Account")] @@ -3100,7 +3143,7 @@ pub struct Tag { ///

Information about a TagOption.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagOptionDetail { ///

The TagOption active state.

#[serde(rename = "Active")] @@ -3122,7 +3165,7 @@ pub struct TagOptionDetail { ///

Summary information about a TagOption.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagOptionSummary { ///

The TagOption key.

#[serde(rename = "Key")] @@ -3158,7 +3201,7 @@ pub struct TerminateProvisionedProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminateProvisionedProductOutput { ///

Information about the result of this request.

#[serde(rename = "RecordDetail")] @@ -3186,7 +3229,7 @@ pub struct UpdateConstraintInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateConstraintOutput { ///

Information about the constraint.

#[serde(rename = "ConstraintDetail")] @@ -3234,7 +3277,7 @@ pub struct UpdatePortfolioInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePortfolioOutput { ///

Information about the portfolio.

#[serde(rename = "PortfolioDetail")] @@ -3294,7 +3337,7 @@ pub struct UpdateProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProductOutput { ///

Information about the product view.

#[serde(rename = "ProductViewDetail")] @@ -3350,7 +3393,7 @@ pub struct UpdateProvisionedProductInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProvisionedProductOutput { ///

Information about the result of the request.

#[serde(rename = "RecordDetail")] @@ -3376,7 +3419,7 @@ pub struct UpdateProvisionedProductPropertiesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProvisionedProductPropertiesOutput { ///

The provisioned product identifier.

#[serde(rename = "ProvisionedProductId")] @@ -3410,6 +3453,10 @@ pub struct UpdateProvisioningArtifactInput { #[serde(rename = "Description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, + ///

Information set by the administrator to provide guidance to end users about which provisioning artifacts to use.

The DEFAULT value indicates that the product version is active.

The administrator can set the guidance to DEPRECATED to inform users that the product version is deprecated. Users are able to make updates to a provisioned product of a deprecated version but cannot launch new provisioned products using a deprecated version.

+ #[serde(rename = "Guidance")] + #[serde(skip_serializing_if = "Option::is_none")] + pub guidance: Option, ///

The updated name of the provisioning artifact.

#[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] @@ -3423,7 +3470,7 @@ pub struct UpdateProvisioningArtifactInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateProvisioningArtifactOutput { ///

The URL of the CloudFormation template in Amazon S3.

#[serde(rename = "Info")] @@ -3513,7 +3560,7 @@ pub struct UpdateServiceActionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServiceActionOutput { ///

Detailed information about the self-service action.

#[serde(rename = "ServiceActionDetail")] @@ -3537,7 +3584,7 @@ pub struct UpdateTagOptionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateTagOptionOutput { ///

Information about the TagOption.

#[serde(rename = "TagOptionDetail")] @@ -3547,7 +3594,7 @@ pub struct UpdateTagOptionOutput { ///

Additional information provided by the administrator.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UsageInstruction { ///

The usage instruction type for the value.

#[serde(rename = "Type")] @@ -4135,6 +4182,8 @@ impl Error for CreatePortfolioError { pub enum CreatePortfolioShareError { ///

One or more parameters provided to the operation are not valid.

InvalidParameters(String), + ///

An attempt was made to modify a resource that is in a state that is not valid. Check your resources to ensure that they are in valid states before retrying the operation.

+ InvalidState(String), ///

The current limits of the service would have been exceeded by this operation. Decrease your resource use or increase your service limits and retry the operation.

LimitExceeded(String), ///

The operation is not supported.

@@ -4152,6 +4201,9 @@ impl CreatePortfolioShareError { err.msg, )) } + "InvalidStateException" => { + return RusotoError::Service(CreatePortfolioShareError::InvalidState(err.msg)) + } "LimitExceededException" => { return RusotoError::Service(CreatePortfolioShareError::LimitExceeded(err.msg)) } @@ -4181,6 +4233,7 @@ impl Error for CreatePortfolioShareError { fn description(&self) -> &str { match *self { CreatePortfolioShareError::InvalidParameters(ref cause) => cause, + CreatePortfolioShareError::InvalidState(ref cause) => cause, CreatePortfolioShareError::LimitExceeded(ref cause) => cause, CreatePortfolioShareError::OperationNotSupported(ref cause) => cause, CreatePortfolioShareError::ResourceNotFound(ref cause) => cause, @@ -4523,6 +4576,8 @@ impl Error for DeletePortfolioError { pub enum DeletePortfolioShareError { ///

One or more parameters provided to the operation are not valid.

InvalidParameters(String), + ///

An attempt was made to modify a resource that is in a state that is not valid. Check your resources to ensure that they are in valid states before retrying the operation.

+ InvalidState(String), ///

The operation is not supported.

OperationNotSupported(String), ///

The specified resource was not found.

@@ -4538,6 +4593,9 @@ impl DeletePortfolioShareError { err.msg, )) } + "InvalidStateException" => { + return RusotoError::Service(DeletePortfolioShareError::InvalidState(err.msg)) + } "OperationNotSupportedException" => { return RusotoError::Service(DeletePortfolioShareError::OperationNotSupported( err.msg, @@ -4564,6 +4622,7 @@ impl Error for DeletePortfolioShareError { fn description(&self) -> &str { match *self { DeletePortfolioShareError::InvalidParameters(ref cause) => cause, + DeletePortfolioShareError::InvalidState(ref cause) => cause, DeletePortfolioShareError::OperationNotSupported(ref cause) => cause, DeletePortfolioShareError::ResourceNotFound(ref cause) => cause, } @@ -5309,6 +5368,51 @@ impl Error for DescribeServiceActionError { } } } +/// Errors returned by DescribeServiceActionExecutionParameters +#[derive(Debug, PartialEq)] +pub enum DescribeServiceActionExecutionParametersError { + ///

One or more parameters provided to the operation are not valid.

+ InvalidParameters(String), + ///

The specified resource was not found.

+ ResourceNotFound(String), +} + +impl DescribeServiceActionExecutionParametersError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InvalidParametersException" => { + return RusotoError::Service( + DescribeServiceActionExecutionParametersError::InvalidParameters(err.msg), + ) + } + "ResourceNotFoundException" => { + return RusotoError::Service( + DescribeServiceActionExecutionParametersError::ResourceNotFound(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeServiceActionExecutionParametersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeServiceActionExecutionParametersError { + fn description(&self) -> &str { + match *self { + DescribeServiceActionExecutionParametersError::InvalidParameters(ref cause) => cause, + DescribeServiceActionExecutionParametersError::ResourceNotFound(ref cause) => cause, + } + } +} /// Errors returned by DescribeTagOption #[derive(Debug, PartialEq)] pub enum DescribeTagOptionError { @@ -7466,6 +7570,14 @@ pub trait ServiceCatalog { input: DescribeServiceActionInput, ) -> RusotoFuture; + fn describe_service_action_execution_parameters( + &self, + input: DescribeServiceActionExecutionParametersInput, + ) -> RusotoFuture< + DescribeServiceActionExecutionParametersOutput, + DescribeServiceActionExecutionParametersError, + >; + ///

Gets information about the specified TagOption.

fn describe_tag_option( &self, @@ -7760,10 +7872,7 @@ impl ServiceCatalogClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ServiceCatalogClient { - ServiceCatalogClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7777,10 +7886,14 @@ impl ServiceCatalogClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ServiceCatalogClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ServiceCatalogClient { + ServiceCatalogClient { client, region } } } @@ -8942,6 +9055,37 @@ impl ServiceCatalog for ServiceCatalogClient { }) } + fn describe_service_action_execution_parameters( + &self, + input: DescribeServiceActionExecutionParametersInput, + ) -> RusotoFuture< + DescribeServiceActionExecutionParametersOutput, + DescribeServiceActionExecutionParametersError, + > { + let mut request = SignedRequest::new("POST", "servicecatalog", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AWS242ServiceCatalogService.DescribeServiceActionExecutionParameters", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(DescribeServiceActionExecutionParametersError::from_response(response)) + })) + } + }) + } + ///

Gets information about the specified TagOption.

fn describe_tag_option( &self, diff --git a/rusoto/services/servicediscovery/Cargo.toml b/rusoto/services/servicediscovery/Cargo.toml index 4f2ee3cc701..55ed4cae6c9 100644 --- a/rusoto/services/servicediscovery/Cargo.toml +++ b/rusoto/services/servicediscovery/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_servicediscovery" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/servicediscovery/README.md b/rusoto/services/servicediscovery/README.md index c409200539e..11763a844d1 100644 --- a/rusoto/services/servicediscovery/README.md +++ b/rusoto/services/servicediscovery/README.md @@ -23,9 +23,16 @@ To use `rusoto_servicediscovery` in your application, add it as a dependency in ```toml [dependencies] -rusoto_servicediscovery = "0.40.0" +rusoto_servicediscovery = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/servicediscovery/src/custom/mod.rs b/rusoto/services/servicediscovery/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/servicediscovery/src/custom/mod.rs +++ b/rusoto/services/servicediscovery/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/servicediscovery/src/generated.rs b/rusoto/services/servicediscovery/src/generated.rs index dfd83e54697..15ef71e8583 100644 --- a/rusoto/services/servicediscovery/src/generated.rs +++ b/rusoto/services/servicediscovery/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -40,7 +39,7 @@ pub struct CreateHttpNamespaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateHttpNamespaceResponse { ///

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

#[serde(rename = "OperationId")] @@ -67,7 +66,7 @@ pub struct CreatePrivateDnsNamespaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePrivateDnsNamespaceResponse { ///

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

#[serde(rename = "OperationId")] @@ -91,7 +90,7 @@ pub struct CreatePublicDnsNamespaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePublicDnsNamespaceResponse { ///

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

#[serde(rename = "OperationId")] @@ -131,7 +130,7 @@ pub struct CreateServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateServiceResponse { ///

A complex type that contains information about the new service.

#[serde(rename = "Service")] @@ -147,7 +146,7 @@ pub struct DeleteNamespaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteNamespaceResponse { ///

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

#[serde(rename = "OperationId")] @@ -163,7 +162,7 @@ pub struct DeleteServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteServiceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -177,7 +176,7 @@ pub struct DeregisterInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterInstanceResponse { ///

A value that you can use to determine whether the request completed successfully. For more information, see GetOperation.

#[serde(rename = "OperationId")] @@ -208,7 +207,7 @@ pub struct DiscoverInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DiscoverInstancesResponse { ///

A complex type that contains one HttpInstanceSummary for each registered instance.

#[serde(rename = "Instances")] @@ -238,7 +237,7 @@ pub struct DnsConfigChange { ///

A complex type that contains the ID for the Route 53 hosted zone that AWS Cloud Map creates when you create a namespace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DnsProperties { ///

The ID for the Route 53 hosted zone that AWS Cloud Map creates when you create a namespace.

#[serde(rename = "HostedZoneId")] @@ -268,7 +267,7 @@ pub struct GetInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstanceResponse { ///

A complex type that contains information about a specified instance.

#[serde(rename = "Instance")] @@ -296,7 +295,7 @@ pub struct GetInstancesHealthStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInstancesHealthStatusResponse { ///

If more than MaxResults instances match the specified criteria, you can submit another GetInstancesHealthStatus request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

#[serde(rename = "NextToken")] @@ -316,7 +315,7 @@ pub struct GetNamespaceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetNamespaceResponse { ///

A complex type that contains information about the specified namespace.

#[serde(rename = "Namespace")] @@ -332,7 +331,7 @@ pub struct GetOperationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOperationResponse { ///

A complex type that contains information about the operation.

#[serde(rename = "Operation")] @@ -348,7 +347,7 @@ pub struct GetServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetServiceResponse { ///

A complex type that contains information about the service.

#[serde(rename = "Service")] @@ -383,7 +382,7 @@ pub struct HealthCheckCustomConfig { ///

In a response to a DiscoverInstance request, HttpInstanceSummary contains information about one instance that matches the values that you specified in the request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HttpInstanceSummary { ///

If you included any attributes when you registered the instance, the values of those attributes.

#[serde(rename = "Attributes")] @@ -409,7 +408,7 @@ pub struct HttpInstanceSummary { ///

A complex type that contains the name of an HTTP namespace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HttpProperties { ///

The name of an HTTP namespace.

#[serde(rename = "HttpName")] @@ -419,7 +418,7 @@ pub struct HttpProperties { ///

A complex type that contains information about an instance that AWS Cloud Map creates when you submit a RegisterInstance request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Instance { ///

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create a Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see "DNSName" in the topic AliasTarget.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record when you created the service.

#[serde(rename = "Attributes")] @@ -436,7 +435,7 @@ pub struct Instance { ///

A complex type that contains information about the instances that you registered by using a specified service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceSummary { ///

A string map that contains the following information:

  • The attributes that are associate with the instance.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

  • AWSALIASDNSNAME: For an alias record that routes traffic to an Elastic Load Balancing load balancer, the DNS name that is associated with the load balancer.

  • AWSINSTANCECNAME: For a CNAME record, the domain name that Route 53 returns in response to DNS queries, for example, example.com.

  • AWSINSTANCEIPV4: For an A record, the IPv4 address that Route 53 returns in response to DNS queries, for example, 192.0.2.44.

  • AWSINSTANCEIPV6: For an AAAA record, the IPv6 address that Route 53 returns in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

  • AWSINSTANCE_PORT: For an SRV record, the value that Route 53 returns for the port. In addition, if the service includes HealthCheckConfig, the port on the endpoint that Route 53 sends requests to.

#[serde(rename = "Attributes")] @@ -464,7 +463,7 @@ pub struct ListInstancesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInstancesResponse { ///

Summary information about the instances that are associated with the specified service.

#[serde(rename = "Instances")] @@ -493,7 +492,7 @@ pub struct ListNamespacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListNamespacesResponse { ///

An array that contains one NamespaceSummary object for each namespace that matches the specified filter criteria.

#[serde(rename = "Namespaces")] @@ -522,7 +521,7 @@ pub struct ListOperationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOperationsResponse { ///

If the response contains NextToken, submit another ListOperations request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

AWS Cloud Map gets MaxResults operations and then filters them based on the specified criteria. It's possible that no operations in the first MaxResults operations matched the specified criteria but that subsequent groups of MaxResults operations do contain operations that match the criteria.

#[serde(rename = "NextToken")] @@ -551,7 +550,7 @@ pub struct ListServicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListServicesResponse { ///

If the response contains NextToken, submit another ListServices request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

AWS Cloud Map gets MaxResults services and then filters them based on the specified criteria. It's possible that no services in the first MaxResults services matched the specified criteria but that subsequent groups of MaxResults services do contain services that match the criteria.

#[serde(rename = "NextToken")] @@ -565,7 +564,7 @@ pub struct ListServicesResponse { ///

A complex type that contains information about a specified namespace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Namespace { ///

The Amazon Resource Name (ARN) that AWS Cloud Map assigns to the namespace when you create it.

#[serde(rename = "Arn")] @@ -622,7 +621,7 @@ pub struct NamespaceFilter { ///

A complex type that contains information that is specific to the namespace type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NamespaceProperties { ///

A complex type that contains the ID for the Route 53 hosted zone that AWS Cloud Map creates when you create a namespace.

#[serde(rename = "DnsProperties")] @@ -636,7 +635,7 @@ pub struct NamespaceProperties { ///

A complex type that contains information about a namespace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NamespaceSummary { ///

The Amazon Resource Name (ARN) that AWS Cloud Map assigns to the namespace when you create it.

#[serde(rename = "Arn")] @@ -673,7 +672,7 @@ pub struct NamespaceSummary { ///

A complex type that contains information about a specified operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Operation { ///

The date and time that the request was submitted, in Unix date/time format and Coordinated Universal Time (UTC). The value of CreateDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

#[serde(rename = "CreateDate")] @@ -726,7 +725,7 @@ pub struct OperationFilter { ///

A complex type that contains information about an operation that matches the criteria that you specified in a ListOperations request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OperationSummary { ///

The ID for an operation.

#[serde(rename = "Id")] @@ -756,7 +755,7 @@ pub struct RegisterInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterInstanceResponse { ///

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

#[serde(rename = "OperationId")] @@ -766,7 +765,7 @@ pub struct RegisterInstanceResponse { ///

A complex type that contains information about the specified service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Service { ///

The Amazon Resource Name (ARN) that AWS Cloud Map assigns to the service when you create it.

#[serde(rename = "Arn")] @@ -846,7 +845,7 @@ pub struct ServiceFilter { ///

A complex type that contains information about a specified service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceSummary { ///

The Amazon Resource Name (ARN) that AWS Cloud Map assigns to the service when you create it.

#[serde(rename = "Arn")] @@ -907,7 +906,7 @@ pub struct UpdateServiceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServiceResponse { ///

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

#[serde(rename = "OperationId")] @@ -1989,10 +1988,7 @@ impl ServiceDiscoveryClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ServiceDiscoveryClient { - ServiceDiscoveryClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2006,10 +2002,14 @@ impl ServiceDiscoveryClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ServiceDiscoveryClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ServiceDiscoveryClient { + ServiceDiscoveryClient { client, region } } } diff --git a/rusoto/services/ses/Cargo.toml b/rusoto/services/ses/Cargo.toml index 908d26db207..59c9df3252c 100644 --- a/rusoto/services/ses/Cargo.toml +++ b/rusoto/services/ses/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ses" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ses/README.md b/rusoto/services/ses/README.md index b92acf8a739..31ce8356940 100644 --- a/rusoto/services/ses/README.md +++ b/rusoto/services/ses/README.md @@ -23,9 +23,16 @@ To use `rusoto_ses` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_ses = "0.40.0" +rusoto_ses = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ses/src/custom/mod.rs b/rusoto/services/ses/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ses/src/custom/mod.rs +++ b/rusoto/services/ses/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ses/src/generated.rs b/rusoto/services/ses/src/generated.rs index a1ed2a0cf81..f344d1df0e3 100644 --- a/rusoto/services/ses/src/generated.rs +++ b/rusoto/services/ses/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -1992,11 +1991,11 @@ impl DescribeReceiptRuleSetResponseDeserializer { ///

Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

#[derive(Default, Debug, Clone, PartialEq)] pub struct Destination { - ///

The BCC: field(s) of the message.

+ ///

The recipients to place on the BCC: line of the message.

pub bcc_addresses: Option>, - ///

The CC: field(s) of the message.

+ ///

The recipients to place on the CC: line of the message.

pub cc_addresses: Option>, - ///

The To: field(s) of the message.

+ ///

The recipients to place on the To: line of the message.

pub to_addresses: Option>, } @@ -3334,7 +3333,7 @@ impl ListConfigurationSetsResponseDeserializer { ) } } -///

Represents a request to list the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

+///

Represents a request to list the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

#[derive(Default, Debug, Clone, PartialEq)] pub struct ListCustomVerificationEmailTemplatesRequest { ///

The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if you specify a value less than 1 or greater than 50, the operation will return up to 50 results.

@@ -5953,7 +5952,7 @@ impl SetReceiptRulePositionResponseDeserializer { ///

When included in a receipt rule, this action terminates the evaluation of the receipt rule set and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

For information about setting a stop action in a receipt rule, see the Amazon SES Developer Guide.

#[derive(Default, Debug, Clone, PartialEq)] pub struct StopAction { - ///

The name of the RuleSet that is being stopped.

+ ///

The scope of the StopAction. The only acceptable value is RuleSet.

pub scope: String, ///

The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

pub topic_arn: Option, @@ -10856,7 +10855,7 @@ pub trait Ses { input: SendEmailRequest, ) -> RusotoFuture; - ///

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

  • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an "Email address not verified" error.

  • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

  • The maximum message size, including attachments, is 10 MB.

  • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

  • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

  • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

  • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

  • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

  • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

    • X-SES-SOURCE-ARN

    • X-SES-FROM-ARN

    • X-SES-RETURN-PATH-ARN

    Do not include these X-headers in the DKIM signature; Amazon SES will remove them before sending the email.

    For most common sending authorization scenarios, we recommend that you specify the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn parameters. If you only specify the SourceIdentityArn parameter, Amazon SES will set the From and Return Path addresses to the identity specified in SourceIdentityArn. For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

  • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

+ ///

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

  • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an "Email address not verified" error.

  • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

  • The maximum message size, including attachments, is 10 MB.

  • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

  • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

  • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

  • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

  • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

  • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

    • X-SES-SOURCE-ARN

    • X-SES-FROM-ARN

    • X-SES-RETURN-PATH-ARN

    Don't include these X-headers in the DKIM signature. Amazon SES removes these before it sends the email.

    If you only specify the SourceIdentityArn parameter, Amazon SES sets the From and Return-Path addresses to the same identity that you specified.

    For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

  • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

fn send_raw_email( &self, input: SendRawEmailRequest, @@ -10874,7 +10873,7 @@ pub trait Ses { input: SetActiveReceiptRuleSetRequest, ) -> RusotoFuture; - ///

Enables or disables Easy DKIM signing of email sent from an identity:

  • If Easy DKIM signing is enabled for a domain name identity (such as example.com), then Amazon SES will DKIM-sign all email sent by addresses under that domain name (for example, user@example.com).

  • If Easy DKIM signing is enabled for an email address, then Amazon SES will DKIM-sign all email sent by that email address.

For email addresses (for example, user@example.com), you can only enable Easy DKIM signing if the corresponding domain (in this case, example.com) has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim operation.

You can execute this operation no more than once per second.

For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

+ ///

Enables or disables Easy DKIM signing of email sent from an identity. If Easy DKIM signing is enabled for a domain, then Amazon SES uses DKIM to sign all email that it sends from addresses on that domain. If Easy DKIM signing is enabled for an email address, then Amazon SES uses DKIM to sign all email it sends from that address.

For email addresses (for example, user@example.com), you can only enable DKIM signing if the corresponding domain (in this case, example.com) has been set up to use Easy DKIM.

You can enable DKIM signing for an identity at any time after you start the verification process for the identity, even if the verification process isn't complete.

You can execute this operation no more than once per second.

For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

fn set_identity_dkim_enabled( &self, input: SetIdentityDkimEnabledRequest, @@ -11012,10 +11011,7 @@ impl SesClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SesClient { - SesClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -11029,10 +11025,14 @@ impl SesClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SesClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SesClient { + SesClient { client, region } } } @@ -11068,7 +11068,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11116,7 +11116,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11169,7 +11169,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11223,7 +11223,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11301,7 +11301,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11352,7 +11352,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11402,7 +11402,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11453,7 +11453,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11501,7 +11501,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11554,7 +11554,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11608,7 +11608,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11687,7 +11687,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11737,7 +11737,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11787,7 +11787,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11838,7 +11838,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11888,7 +11888,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -11939,7 +11939,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12012,7 +12012,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12060,7 +12060,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12110,7 +12110,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12158,7 +12158,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12205,7 +12205,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12258,7 +12258,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12306,7 +12306,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12359,7 +12359,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12412,7 +12412,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12462,7 +12462,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12515,7 +12515,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12563,7 +12563,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12613,7 +12613,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12664,7 +12664,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12712,7 +12712,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12765,7 +12765,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12816,7 +12816,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12866,7 +12866,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12917,7 +12917,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -12967,7 +12967,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13018,7 +13018,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13065,7 +13065,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13118,7 +13118,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13169,7 +13169,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13217,7 +13217,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13268,7 +13268,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13316,7 +13316,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13364,7 +13364,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13415,7 +13415,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13432,7 +13432,7 @@ impl Ses for SesClient { }) } - ///

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

  • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an "Email address not verified" error.

  • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

  • The maximum message size, including attachments, is 10 MB.

  • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

  • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

  • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

  • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

  • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

  • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

    • X-SES-SOURCE-ARN

    • X-SES-FROM-ARN

    • X-SES-RETURN-PATH-ARN

    Do not include these X-headers in the DKIM signature; Amazon SES will remove them before sending the email.

    For most common sending authorization scenarios, we recommend that you specify the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn parameters. If you only specify the SourceIdentityArn parameter, Amazon SES will set the From and Return Path addresses to the identity specified in SourceIdentityArn. For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

  • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

+ ///

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

  • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an "Email address not verified" error.

  • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

  • The maximum message size, including attachments, is 10 MB.

  • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

  • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

  • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

  • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

  • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

  • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

    • X-SES-SOURCE-ARN

    • X-SES-FROM-ARN

    • X-SES-RETURN-PATH-ARN

    Don't include these X-headers in the DKIM signature. Amazon SES removes these before it sends the email.

    If you only specify the SourceIdentityArn parameter, Amazon SES sets the From and Return-Path addresses to the same identity that you specified.

    For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

  • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

fn send_raw_email( &self, input: SendRawEmailRequest, @@ -13464,7 +13464,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13515,7 +13515,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13563,7 +13563,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13582,7 +13582,7 @@ impl Ses for SesClient { }) } - ///

Enables or disables Easy DKIM signing of email sent from an identity:

  • If Easy DKIM signing is enabled for a domain name identity (such as example.com), then Amazon SES will DKIM-sign all email sent by addresses under that domain name (for example, user@example.com).

  • If Easy DKIM signing is enabled for an email address, then Amazon SES will DKIM-sign all email sent by that email address.

For email addresses (for example, user@example.com), you can only enable Easy DKIM signing if the corresponding domain (in this case, example.com) has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim operation.

You can execute this operation no more than once per second.

For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

+ ///

Enables or disables Easy DKIM signing of email sent from an identity. If Easy DKIM signing is enabled for a domain, then Amazon SES uses DKIM to sign all email that it sends from addresses on that domain. If Easy DKIM signing is enabled for an email address, then Amazon SES uses DKIM to sign all email it sends from that address.

For email addresses (for example, user@example.com), you can only enable DKIM signing if the corresponding domain (in this case, example.com) has been set up to use Easy DKIM.

You can enable DKIM signing for an identity at any time after you start the verification process for the identity, even if the verification process isn't complete.

You can execute this operation no more than once per second.

For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

fn set_identity_dkim_enabled( &self, input: SetIdentityDkimEnabledRequest, @@ -13611,7 +13611,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13664,7 +13664,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13719,7 +13719,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13768,7 +13768,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13816,7 +13816,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13864,7 +13864,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13915,7 +13915,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -13993,7 +13993,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14107,7 +14107,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14186,7 +14186,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14237,7 +14237,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14288,7 +14288,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14338,7 +14338,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -14416,7 +14416,7 @@ impl Ses for SesClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/shield/Cargo.toml b/rusoto/services/shield/Cargo.toml index a1e558f95d8..ef77626e9f9 100644 --- a/rusoto/services/shield/Cargo.toml +++ b/rusoto/services/shield/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_shield" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/shield/README.md b/rusoto/services/shield/README.md index ab6a3ea8bce..5dea40cadd4 100644 --- a/rusoto/services/shield/README.md +++ b/rusoto/services/shield/README.md @@ -23,9 +23,16 @@ To use `rusoto_shield` in your application, add it as a dependency in your `Carg ```toml [dependencies] -rusoto_shield = "0.40.0" +rusoto_shield = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/shield/src/custom/mod.rs b/rusoto/services/shield/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/shield/src/custom/mod.rs +++ b/rusoto/services/shield/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/shield/src/generated.rs b/rusoto/services/shield/src/generated.rs index bcfccec998b..5e42e5f5238 100644 --- a/rusoto/services/shield/src/generated.rs +++ b/rusoto/services/shield/src/generated.rs @@ -9,30 +9,29 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct AssociateDRTLogBucketRequest { - ///

The Amazon S3 bucket that contains your flow logs.

+ ///

The Amazon S3 bucket that contains your AWS WAF logs.

#[serde(rename = "LogBucket")] pub log_bucket: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDRTLogBucketResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -43,12 +42,12 @@ pub struct AssociateDRTRoleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDRTRoleResponse {} ///

The details of a DDoS attack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttackDetail { ///

List of counters that describe the attack for the specified time period.

#[serde(rename = "AttackCounters")] @@ -86,13 +85,13 @@ pub struct AttackDetail { ///

Details of the described attack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttackProperty { - ///

The type of DDoS event that was observed. NETWORK indicates layer 3 and layer 4 events and APPLICATION indicates layer 7 events.

+ ///

The type of distributed denial of service (DDoS) event that was observed. NETWORK indicates layer 3 and layer 4 events and APPLICATION indicates layer 7 events.

#[serde(rename = "AttackLayer")] #[serde(skip_serializing_if = "Option::is_none")] pub attack_layer: Option, - ///

Defines the DDoS attack property information that is provided.

+ ///

Defines the DDoS attack property information that is provided. The WORDPRESS_PINGBACK_REFLECTOR and WORDPRESS_PINGBACK_SOURCE values are valid only for WordPress reflective pingback DDoS attacks.

#[serde(rename = "AttackPropertyIdentifier")] #[serde(skip_serializing_if = "Option::is_none")] pub attack_property_identifier: Option, @@ -112,7 +111,7 @@ pub struct AttackProperty { ///

Summarizes all DDoS attacks for a specified time period.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttackSummary { ///

The unique identifier (ID) of the attack.

#[serde(rename = "AttackId")] @@ -138,16 +137,16 @@ pub struct AttackSummary { ///

Describes the attack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttackVectorDescription { - ///

The attack type. Valid values:

  • UDPTRAFFIC

  • UDPFRAGMENT

  • GENERICUDPREFLECTION

  • DNSREFLECTION

  • NTPREFLECTION

  • CHARGENREFLECTION

  • SSDPREFLECTION

  • PORTMAPPER

  • RIPREFLECTION

  • SNMPREFLECTION

  • MSSQLREFLECTION

  • NETBIOSREFLECTION

  • SYNFLOOD

  • ACKFLOOD

  • REQUEST_FLOOD

+ ///

The attack type. Valid values:

  • UDPTRAFFIC

  • UDPFRAGMENT

  • GENERICUDPREFLECTION

  • DNSREFLECTION

  • NTPREFLECTION

  • CHARGENREFLECTION

  • SSDPREFLECTION

  • PORTMAPPER

  • RIPREFLECTION

  • SNMPREFLECTION

  • MSSQLREFLECTION

  • NETBIOSREFLECTION

  • SYNFLOOD

  • ACKFLOOD

  • REQUESTFLOOD

  • HTTPREFLECTION

  • UDSREFLECTION

  • MEMCACHEDREFLECTION

#[serde(rename = "VectorType")] pub vector_type: String, } ///

A contributor to the attack and their contribution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Contributor { ///

The name of the contributor. This is dependent on the AttackPropertyIdentifier. For example, if the AttackPropertyIdentifier is SOURCE_COUNTRY, the Name could be United States.

#[serde(rename = "Name")] @@ -170,7 +169,7 @@ pub struct CreateProtectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateProtectionResponse { ///

The unique identifier (ID) for the Protection object that is created.

#[serde(rename = "ProtectionId")] @@ -182,7 +181,7 @@ pub struct CreateProtectionResponse { pub struct CreateSubscriptionRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSubscriptionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -193,14 +192,14 @@ pub struct DeleteProtectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteProtectionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteSubscriptionRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSubscriptionResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -211,7 +210,7 @@ pub struct DescribeAttackRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAttackResponse { ///

The attack that is described.

#[serde(rename = "Attack")] @@ -223,7 +222,7 @@ pub struct DescribeAttackResponse { pub struct DescribeDRTAccessRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDRTAccessResponse { ///

The list of Amazon S3 buckets accessed by the DRT.

#[serde(rename = "LogBucketList")] @@ -239,7 +238,7 @@ pub struct DescribeDRTAccessResponse { pub struct DescribeEmergencyContactSettingsRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEmergencyContactSettingsResponse { ///

A list of email addresses that the DRT can use to contact you during a suspected attack.

#[serde(rename = "EmergencyContactList")] @@ -260,7 +259,7 @@ pub struct DescribeProtectionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeProtectionResponse { ///

The Protection object that is described.

#[serde(rename = "Protection")] @@ -272,7 +271,7 @@ pub struct DescribeProtectionResponse { pub struct DescribeSubscriptionRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSubscriptionResponse { ///

The AWS Shield Advanced subscription details for an account.

#[serde(rename = "Subscription")] @@ -282,20 +281,20 @@ pub struct DescribeSubscriptionResponse { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DisassociateDRTLogBucketRequest { - ///

The Amazon S3 bucket that contains your flow logs.

+ ///

The Amazon S3 bucket that contains your AWS WAF logs.

#[serde(rename = "LogBucket")] pub log_bucket: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDRTLogBucketResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DisassociateDRTRoleRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDRTRoleResponse {} ///

Contact information that the DRT can use to contact you during a suspected attack.

@@ -310,7 +309,7 @@ pub struct EmergencyContact { pub struct GetSubscriptionStateRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSubscriptionStateResponse { ///

The status of the subscription.

#[serde(rename = "SubscriptionState")] @@ -319,7 +318,7 @@ pub struct GetSubscriptionStateResponse { ///

Specifies how many protections of a given type you can create.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Limit { ///

The maximum number of protections that can be created for the specified Type.

#[serde(rename = "Max")] @@ -356,7 +355,7 @@ pub struct ListAttacksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAttacksResponse { ///

The attack information for the specified time range.

#[serde(rename = "AttackSummaries")] @@ -381,7 +380,7 @@ pub struct ListProtectionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListProtectionsResponse { ///

If you specify a value for MaxResults and you have more Protections than the value of MaxResults, AWS Shield Advanced returns a NextToken value in the response that allows you to list another group of Protections. For the second and subsequent ListProtections requests, specify the value of NextToken from the previous response to get information about another batch of Protections.

AWS WAF might return the list of Protection objects in batches smaller than the number specified by MaxResults. If there are more Protection objects to return, AWS WAF will always also return a NextToken.

#[serde(rename = "NextToken")] @@ -395,7 +394,7 @@ pub struct ListProtectionsResponse { ///

The mitigation applied to a DDoS attack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Mitigation { ///

The name of the mitigation taken for this attack.

#[serde(rename = "MitigationName")] @@ -405,7 +404,7 @@ pub struct Mitigation { ///

An object that represents a resource that is under DDoS protection.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Protection { ///

The unique identifier (ID) of the protection.

#[serde(rename = "Id")] @@ -423,7 +422,7 @@ pub struct Protection { ///

The attack information for the specified SubResource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubResourceSummary { ///

The list of attack types and associated counters.

#[serde(rename = "AttackVectors")] @@ -445,7 +444,7 @@ pub struct SubResourceSummary { ///

Information about the AWS Shield Advanced subscription for an account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Subscription { ///

If ENABLED, the subscription will be automatically renewed at the end of the existing subscription period.

When you initally create a subscription, AutoRenew is set to ENABLED. You can change this by submitting an UpdateSubscription request. If the UpdateSubscription request does not included a value for AutoRenew, the existing value for AutoRenew remains unchanged.

#[serde(rename = "AutoRenew")] @@ -471,7 +470,7 @@ pub struct Subscription { ///

A summary of information about the attack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SummarizedAttackVector { ///

The list of counters that describe the details of the attack.

#[serde(rename = "VectorCounters")] @@ -484,7 +483,7 @@ pub struct SummarizedAttackVector { ///

The counter that describes a DDoS attack.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SummarizedCounter { ///

The average value of the counter for a specified time period.

#[serde(rename = "Average")] @@ -534,7 +533,7 @@ pub struct UpdateEmergencyContactSettingsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateEmergencyContactSettingsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -546,7 +545,7 @@ pub struct UpdateSubscriptionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSubscriptionResponse {} /// Errors returned by AssociateDRTLogBucket @@ -1493,7 +1492,7 @@ impl Error for UpdateSubscriptionError { } /// Trait representing the capabilities of the AWS Shield API. AWS Shield clients implement this trait. pub trait Shield { - ///

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your flow logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

+ ///

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

fn associate_drt_log_bucket( &self, input: AssociateDRTLogBucketRequest, @@ -1511,7 +1510,7 @@ pub trait Shield { input: CreateProtectionRequest, ) -> RusotoFuture; - ///

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

+ ///

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

To use the services of the DRT, you must be subscribed to the Business Support plan or the Enterprise Support plan.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

fn create_subscription( &self, ) -> RusotoFuture; @@ -1554,7 +1553,7 @@ pub trait Shield { &self, ) -> RusotoFuture; - ///

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your flow logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

+ ///

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

fn disassociate_drt_log_bucket( &self, input: DisassociateDRTLogBucketRequest, @@ -1606,10 +1605,7 @@ impl ShieldClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ShieldClient { - ShieldClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1623,15 +1619,19 @@ impl ShieldClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ShieldClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ShieldClient { + ShieldClient { client, region } } } impl Shield for ShieldClient { - ///

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your flow logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

+ ///

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

fn associate_drt_log_bucket( &self, input: AssociateDRTLogBucketRequest, @@ -1717,7 +1717,7 @@ impl Shield for ShieldClient { }) } - ///

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

+ ///

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

To use the services of the DRT, you must be subscribed to the Business Support plan or the Enterprise Support plan.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

fn create_subscription( &self, ) -> RusotoFuture { @@ -1941,7 +1941,7 @@ impl Shield for ShieldClient { }) } - ///

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your flow logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

+ ///

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

fn disassociate_drt_log_bucket( &self, input: DisassociateDRTLogBucketRequest, diff --git a/rusoto/services/sms/Cargo.toml b/rusoto/services/sms/Cargo.toml index 098b60211e8..80eec2e074b 100644 --- a/rusoto/services/sms/Cargo.toml +++ b/rusoto/services/sms/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sms" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sms/README.md b/rusoto/services/sms/README.md index 2bd67c5818a..6e761dd461c 100644 --- a/rusoto/services/sms/README.md +++ b/rusoto/services/sms/README.md @@ -23,9 +23,16 @@ To use `rusoto_sms` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_sms = "0.40.0" +rusoto_sms = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sms/src/custom/mod.rs b/rusoto/services/sms/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/sms/src/custom/mod.rs +++ b/rusoto/services/sms/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/sms/src/generated.rs b/rusoto/services/sms/src/generated.rs index 98a78751981..1edb72108a9 100644 --- a/rusoto/services/sms/src/generated.rs +++ b/rusoto/services/sms/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Information about the application.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AppSummary { ///

Unique ID of the application.

#[serde(rename = "appId")] @@ -96,7 +95,7 @@ pub struct AppSummary { ///

Represents a connector.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Connector { ///

The time the connector was associated.

#[serde(rename = "associatedOn")] @@ -169,7 +168,7 @@ pub struct CreateAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAppResponse { ///

Summary description of the application.

#[serde(rename = "appSummary")] @@ -228,7 +227,7 @@ pub struct CreateReplicationJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateReplicationJobResponse { ///

The unique identifier of the replication job.

#[serde(rename = "replicationJobId")] @@ -245,7 +244,7 @@ pub struct DeleteAppLaunchConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAppLaunchConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -257,7 +256,7 @@ pub struct DeleteAppReplicationConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAppReplicationConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -277,7 +276,7 @@ pub struct DeleteAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAppResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -288,14 +287,14 @@ pub struct DeleteReplicationJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteReplicationJobResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteServerCatalogRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteServerCatalogResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -306,7 +305,7 @@ pub struct DisassociateConnectorRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateConnectorResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -322,7 +321,7 @@ pub struct GenerateChangeSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenerateChangeSetResponse { ///

Location of the Amazon S3 object.

#[serde(rename = "s3Location")] @@ -343,7 +342,7 @@ pub struct GenerateTemplateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GenerateTemplateResponse { ///

Location of the Amazon S3 object.

#[serde(rename = "s3Location")] @@ -360,7 +359,7 @@ pub struct GetAppLaunchConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAppLaunchConfigurationResponse { ///

ID of the application associated with the launch configuration.

#[serde(rename = "appId")] @@ -385,7 +384,7 @@ pub struct GetAppReplicationConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAppReplicationConfigurationResponse { ///

Replication configurations associated with server groups in this application.

#[serde(rename = "serverGroupReplicationConfigurations")] @@ -402,7 +401,7 @@ pub struct GetAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAppResponse { ///

Information about the application.

#[serde(rename = "appSummary")] @@ -431,7 +430,7 @@ pub struct GetConnectorsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectorsResponse { ///

Information about the registered connectors.

#[serde(rename = "connectorList")] @@ -460,7 +459,7 @@ pub struct GetReplicationJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetReplicationJobsResponse { ///

The token required to retrieve the next set of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -488,7 +487,7 @@ pub struct GetReplicationRunsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetReplicationRunsResponse { ///

The token required to retrieve the next set of results. This value is null when there are no more results to return.

#[serde(rename = "nextToken")] @@ -521,7 +520,7 @@ pub struct GetServersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetServersResponse { ///

The time when the server was last modified.

#[serde(rename = "lastModifiedOn")] @@ -545,7 +544,7 @@ pub struct GetServersResponse { pub struct ImportServerCatalogRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportServerCatalogResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -557,12 +556,12 @@ pub struct LaunchAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LaunchAppResponse {} ///

Details about the latest launch of an application.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LaunchDetails { ///

Latest time this application was launched successfully.

#[serde(rename = "latestLaunchTime")] @@ -595,7 +594,7 @@ pub struct ListAppsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAppsResponse { ///

A list of application summaries.

#[serde(rename = "apps")] @@ -624,7 +623,7 @@ pub struct PutAppLaunchConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAppLaunchConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -640,12 +639,12 @@ pub struct PutAppReplicationConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutAppReplicationConfigurationResponse {} ///

Represents a replication job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationJob { ///

The description of the replication job.

#[serde(rename = "description")] @@ -723,7 +722,7 @@ pub struct ReplicationJob { ///

Represents a replication run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationRun { ///

The identifier of the Amazon Machine Image (AMI) from the replication run.

#[serde(rename = "amiId")] @@ -773,7 +772,7 @@ pub struct ReplicationRun { ///

Details of the current stage of a replication run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ReplicationRunStageDetails { ///

String describing the current stage of a replication run.

#[serde(rename = "stage")] @@ -966,7 +965,7 @@ pub struct StartAppReplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartAppReplicationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -981,7 +980,7 @@ pub struct StartOnDemandReplicationRunRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartOnDemandReplicationRunResponse { ///

The identifier of the replication run.

#[serde(rename = "replicationRunId")] @@ -998,7 +997,7 @@ pub struct StopAppReplicationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopAppReplicationResponse {} ///

A label that can be assigned to an application.

@@ -1023,7 +1022,7 @@ pub struct TerminateAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminateAppResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1055,7 +1054,7 @@ pub struct UpdateAppRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAppResponse { ///

Summary description of the application.

#[serde(rename = "appSummary")] @@ -1111,7 +1110,7 @@ pub struct UpdateReplicationJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateReplicationJobResponse {} ///

A script that runs on first launch of an Amazon EC2 instance. Used for configuring the server during launch.

@@ -3074,10 +3073,7 @@ impl ServerMigrationServiceClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> ServerMigrationServiceClient { - ServerMigrationServiceClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3091,10 +3087,14 @@ impl ServerMigrationServiceClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - ServerMigrationServiceClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> ServerMigrationServiceClient { + ServerMigrationServiceClient { client, region } } } diff --git a/rusoto/services/snowball/Cargo.toml b/rusoto/services/snowball/Cargo.toml index 6c502013586..f684a409b71 100644 --- a/rusoto/services/snowball/Cargo.toml +++ b/rusoto/services/snowball/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_snowball" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/snowball/README.md b/rusoto/services/snowball/README.md index 3feaa29336a..f8f220d114b 100644 --- a/rusoto/services/snowball/README.md +++ b/rusoto/services/snowball/README.md @@ -23,9 +23,16 @@ To use `rusoto_snowball` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_snowball = "0.40.0" +rusoto_snowball = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/snowball/src/custom/mod.rs b/rusoto/services/snowball/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/snowball/src/custom/mod.rs +++ b/rusoto/services/snowball/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/snowball/src/generated.rs b/rusoto/services/snowball/src/generated.rs index 7d6ca7ea2fa..5c0ee924aa3 100644 --- a/rusoto/services/snowball/src/generated.rs +++ b/rusoto/services/snowball/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -93,7 +92,7 @@ pub struct CancelClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelClusterResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -104,12 +103,12 @@ pub struct CancelJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelJobResult {} ///

Contains a cluster's state, a cluster's ID, and other important information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterListEntry { ///

The 39-character ID for the cluster that you want to list, for example CID123e4567-e89b-12d3-a456-426655440000.

#[serde(rename = "ClusterId")] @@ -131,7 +130,7 @@ pub struct ClusterListEntry { ///

Contains metadata about a specific cluster.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClusterMetadata { ///

The automatically generated ID for a specific address.

#[serde(rename = "AddressId")] @@ -189,7 +188,7 @@ pub struct ClusterMetadata { ///

A JSON-formatted object that describes a compatible Amazon Machine Image (AMI). For more information on compatible AMIs, see Using Amazon EC2 Compute Instances in the AWS Snowball Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompatibleImage { ///

The unique identifier for an individual Snowball Edge AMI.

#[serde(rename = "AmiId")] @@ -209,7 +208,7 @@ pub struct CreateAddressRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAddressResult { ///

The automatically generated ID for a specific address. You'll use this ID when you create a job to specify which address you want the Snowball for that job shipped to.

#[serde(rename = "AddressId")] @@ -257,7 +256,7 @@ pub struct CreateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateClusterResult { ///

The automatically generated ID for a cluster.

#[serde(rename = "ClusterId")] @@ -318,7 +317,7 @@ pub struct CreateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateJobResult { ///

The automatically generated ID for a job, for example JID123e4567-e89b-12d3-a456-426655440000.

#[serde(rename = "JobId")] @@ -328,7 +327,7 @@ pub struct CreateJobResult { ///

Defines the real-time status of a Snowball's data transfer while the device is at AWS. This data is only available while a job has a JobState value of InProgress, for both import and export jobs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DataTransfer { ///

The number of bytes transferred between a Snowball and Amazon S3.

#[serde(rename = "BytesTransferred")] @@ -356,7 +355,7 @@ pub struct DescribeAddressRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAddressResult { ///

The address that you want the Snowball or Snowballs associated with a specific job to be shipped to.

#[serde(rename = "Address")] @@ -377,7 +376,7 @@ pub struct DescribeAddressesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAddressesResult { ///

The Snowball shipping addresses that were created for this account.

#[serde(rename = "Addresses")] @@ -397,7 +396,7 @@ pub struct DescribeClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClusterResult { ///

Information about a specific cluster, including shipping information, cluster status, and other important metadata.

#[serde(rename = "ClusterMetadata")] @@ -413,7 +412,7 @@ pub struct DescribeJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeJobResult { ///

Information about a specific job, including shipping information, job status, and other important metadata.

#[serde(rename = "JobMetadata")] @@ -454,7 +453,7 @@ pub struct GetJobManifestRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobManifestResult { ///

The Amazon S3 presigned URL for the manifest file associated with the specified JobId value.

#[serde(rename = "ManifestURI")] @@ -470,7 +469,7 @@ pub struct GetJobUnlockCodeRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetJobUnlockCodeResult { ///

The UnlockCode value for the specified job. The UnlockCode value can be accessed for up to 90 days after the job has been created.

#[serde(rename = "UnlockCode")] @@ -482,7 +481,7 @@ pub struct GetJobUnlockCodeResult { pub struct GetSnowballUsageRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSnowballUsageResult { ///

The service limit for number of Snowballs this account can have at once. The default service limit is 1 (one).

#[serde(rename = "SnowballLimit")] @@ -496,7 +495,7 @@ pub struct GetSnowballUsageResult { ///

Each JobListEntry object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobListEntry { ///

The creation date for this job.

#[serde(rename = "CreationDate")] @@ -530,7 +529,7 @@ pub struct JobListEntry { ///

Contains job logs. Whenever Snowball is used to import data into or export data out of Amazon S3, you'll have the option of downloading a PDF job report. Job logs are returned as a part of the response syntax of the DescribeJob action in the JobMetadata data type. The job logs can be accessed for up to 60 minutes after this request has been made. To access any of the job logs after 60 minutes have passed, you'll have to make another call to the DescribeJob action.

For import jobs, the PDF job report becomes available at the end of the import process. For export jobs, your job report typically becomes available while the Snowball for your job part is being delivered to you.

The job report provides you insight into the state of your Amazon S3 data transfer. The report includes details about your job or job part for your records.

For deeper visibility into the status of your transferred objects, you can look at the two associated logs: a success log and a failure log. The logs are saved in comma-separated value (CSV) format, and the name of each log includes the ID of the job or job part that the log describes.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobLogs { ///

A link to an Amazon S3 presigned URL where the job completion report is located.

#[serde(rename = "JobCompletionReportURI")] @@ -548,7 +547,7 @@ pub struct JobLogs { ///

Contains information about a specific job including shipping information, job status, and other important metadata. This information is returned as a part of the response syntax of the DescribeJob action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JobMetadata { ///

The ID for the address that you want the Snowball shipped to.

#[serde(rename = "AddressId")] @@ -679,7 +678,7 @@ pub struct ListClusterJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClusterJobsResult { ///

Each JobListEntry object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs.

#[serde(rename = "JobListEntries")] @@ -704,7 +703,7 @@ pub struct ListClustersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListClustersResult { ///

Each ClusterListEntry object contains a cluster's state, a cluster's ID, and other important status information.

#[serde(rename = "ClusterListEntries")] @@ -729,7 +728,7 @@ pub struct ListCompatibleImagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCompatibleImagesResult { ///

A JSON-formatted object that describes a compatible AMI.

#[serde(rename = "CompatibleImages")] @@ -754,7 +753,7 @@ pub struct ListJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListJobsResult { ///

Each JobListEntry object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs.

#[serde(rename = "JobListEntries")] @@ -798,7 +797,7 @@ pub struct S3Resource { ///

The Status and TrackingNumber information for an inbound or outbound shipment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Shipment { ///

Status information for a shipment.

#[serde(rename = "Status")] @@ -812,7 +811,7 @@ pub struct Shipment { ///

A job's shipping information, including inbound and outbound tracking numbers and shipping speed options.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ShippingDetails { ///

The Status and TrackingNumber values for a Snowball being returned to AWS for a particular job.

#[serde(rename = "InboundShipment")] @@ -864,7 +863,7 @@ pub struct UpdateClusterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateClusterResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -907,7 +906,7 @@ pub struct UpdateJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateJobResult {} /// Errors returned by CancelCluster @@ -1767,10 +1766,7 @@ impl SnowballClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SnowballClient { - SnowballClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1784,10 +1780,14 @@ impl SnowballClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SnowballClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SnowballClient { + SnowballClient { client, region } } } diff --git a/rusoto/services/sns/Cargo.toml b/rusoto/services/sns/Cargo.toml index c676ee97322..fe67a65d015 100644 --- a/rusoto/services/sns/Cargo.toml +++ b/rusoto/services/sns/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sns" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sns/README.md b/rusoto/services/sns/README.md index 138ca0b33ca..ec9f0afcffa 100644 --- a/rusoto/services/sns/README.md +++ b/rusoto/services/sns/README.md @@ -23,9 +23,16 @@ To use `rusoto_sns` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_sns = "0.40.0" +rusoto_sns = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sns/src/custom/mod.rs b/rusoto/services/sns/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/sns/src/custom/mod.rs +++ b/rusoto/services/sns/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/sns/src/generated.rs b/rusoto/services/sns/src/generated.rs index d6680ebf60e..6562ad93e3f 100644 --- a/rusoto/services/sns/src/generated.rs +++ b/rusoto/services/sns/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -5038,10 +5037,7 @@ impl SnsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SnsClient { - SnsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -5055,10 +5051,14 @@ impl SnsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SnsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SnsClient { + SnsClient { client, region } } } @@ -5117,7 +5117,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5167,7 +5167,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5215,7 +5215,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5263,7 +5263,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5314,7 +5314,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5437,7 +5437,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5488,7 +5488,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5539,7 +5539,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5587,7 +5587,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5638,7 +5638,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5691,7 +5691,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5739,7 +5739,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5787,7 +5787,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5838,7 +5838,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5886,7 +5886,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5936,7 +5936,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -5987,7 +5987,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6038,7 +6038,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6086,7 +6086,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6214,7 +6214,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6315,7 +6315,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6364,7 +6364,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -6440,7 +6440,7 @@ impl Sns for SnsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/sqs/Cargo.toml b/rusoto/services/sqs/Cargo.toml index dcfb1157cc1..254087d63c0 100644 --- a/rusoto/services/sqs/Cargo.toml +++ b/rusoto/services/sqs/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sqs" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -17,18 +17,20 @@ exclude = ["test_resources/*"] [dependencies] bytes = "0.4.12" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sqs/README.md b/rusoto/services/sqs/README.md index 44f9581e19e..304b76cb81b 100644 --- a/rusoto/services/sqs/README.md +++ b/rusoto/services/sqs/README.md @@ -23,9 +23,16 @@ To use `rusoto_sqs` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_sqs = "0.40.0" +rusoto_sqs = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sqs/src/custom/custom_tests.rs b/rusoto/services/sqs/src/custom/custom_tests.rs index 52f81d5f269..b9a4376c1d0 100644 --- a/rusoto/services/sqs/src/custom/custom_tests.rs +++ b/rusoto/services/sqs/src/custom/custom_tests.rs @@ -1,19 +1,23 @@ extern crate rusoto_mock; +use crate::generated::{ + GetQueueUrlError, GetQueueUrlRequest, MessageAttributeValue, ReceiveMessageRequest, + SendMessageRequest, Sqs, SqsClient, +}; use std::collections::HashMap; -use crate::generated::{Sqs, SqsClient, SendMessageRequest, ReceiveMessageRequest, MessageAttributeValue, GetQueueUrlRequest, GetQueueUrlError}; -use rusoto_core::{Region, RusotoError}; +use self::rusoto_mock::*; +use rusoto_core::param::Params; use rusoto_core::signature::SignedRequest; use rusoto_core::signature::SignedRequestPayload; -use rusoto_core::param::Params; +use rusoto_core::{Region, RusotoError}; use serde_urlencoded; -use self::rusoto_mock::*; #[test] fn should_serialize_map_parameters_in_request_body() { let mock = MockRequestDispatcher::with_status(200) - .with_body(r#" + .with_body( + r#" @@ -31,7 +35,8 @@ fn should_serialize_map_parameters_in_request_body() { 27daac76-34dd-47df-bd01-1f6e873584a0 - "#) + "#, + ) .with_request_checker(|request: &SignedRequest| { println!("{:#?}", request.params); @@ -39,12 +44,18 @@ fn should_serialize_map_parameters_in_request_body() { assert_eq!("/", request.path); if let Some(SignedRequestPayload::Buffer(ref buffer)) = request.payload { let params: Params = serde_urlencoded::from_bytes(buffer).unwrap(); - assert_eq!(Some(&Some("test_attribute_name".to_owned())), - params.get("MessageAttribute.1.Name")); - assert_eq!(Some(&Some("test_attribute_value".to_owned())), - params.get("MessageAttribute.1.Value.StringValue")); - assert_eq!(Some(&Some("String".to_owned())), - params.get("MessageAttribute.1.Value.DataType")); + assert_eq!( + Some(&Some("test_attribute_name".to_owned())), + params.get("MessageAttribute.1.Name") + ); + assert_eq!( + Some(&Some("test_attribute_value".to_owned())), + params.get("MessageAttribute.1.Value.StringValue") + ); + assert_eq!( + Some(&Some("String".to_owned())), + params.get("MessageAttribute.1.Value.DataType") + ); } else { panic!("Unexpected request.payload: {:?}", request.payload); } @@ -52,12 +63,14 @@ fn should_serialize_map_parameters_in_request_body() { let mut message_attributes = HashMap::new(); - message_attributes.insert("test_attribute_name".to_owned(), - MessageAttributeValue { - string_value: Some("test_attribute_value".to_owned()), - data_type: "String".to_owned(), - ..Default::default() - }); + message_attributes.insert( + "test_attribute_name".to_owned(), + MessageAttributeValue { + string_value: Some("test_attribute_value".to_owned()), + data_type: "String".to_owned(), + ..Default::default() + }, + ); let request = SendMessageRequest { message_body: "foo".to_owned(), queue_url: "bar".to_owned(), @@ -72,7 +85,8 @@ fn should_serialize_map_parameters_in_request_body() { #[test] fn should_fix_issue_323() { let mock = MockRequestDispatcher::with_status(200) - .with_body(r#" + .with_body( + r#" @@ -99,20 +113,23 @@ fn should_fix_issue_323() { b6633655-283d-45b4-aee4-4e84e0ae6afa - "#) + "#, + ) .with_request_checker(|request: &SignedRequest| { assert_eq!("POST", request.method); assert_eq!("/", request.path); if let Some(SignedRequestPayload::Buffer(ref buffer)) = request.payload { let params: Params = serde_urlencoded::from_bytes(buffer).unwrap(); - assert_eq!(params.get("Action"), - Some(&Some("ReceiveMessage".to_owned()))); - assert_eq!(params.get("MaxNumberOfMessages"), - Some(&Some("1".to_owned()))); - assert_eq!(params.get("VisibilityTimeout"), - Some(&Some("2".to_owned()))); - assert_eq!(params.get("WaitTimeSeconds"), - Some(&Some("3".to_owned()))); + assert_eq!( + params.get("Action"), + Some(&Some("ReceiveMessage".to_owned())) + ); + assert_eq!( + params.get("MaxNumberOfMessages"), + Some(&Some("1".to_owned())) + ); + assert_eq!(params.get("VisibilityTimeout"), Some(&Some("2".to_owned()))); + assert_eq!(params.get("WaitTimeSeconds"), Some(&Some("3".to_owned()))); assert_eq!(params.get("Integer"), None); } else { panic!("Unexpected request.payload: {:?}", request.payload); @@ -133,8 +150,8 @@ fn should_fix_issue_323() { #[test] fn test_parse_queue_does_not_exist_error() { - let mock = MockRequestDispatcher::with_status(400) - .with_body(r#" + let mock = MockRequestDispatcher::with_status(400).with_body( + r#" Sender @@ -143,7 +160,8 @@ fn test_parse_queue_does_not_exist_error() { 8f8f9957-c0d9-536a-9ca6-ca7483be06ad - "#); + "#, + ); let request = GetQueueUrlRequest { queue_name: "no-such-queue".to_owned(), @@ -154,5 +172,10 @@ fn test_parse_queue_does_not_exist_error() { let result = client.get_queue_url(request).sync(); assert!(result.is_err()); let err = result.err().unwrap(); - assert_eq!(RusotoError::Service(GetQueueUrlError::QueueDoesNotExist("The specified queue does not exist for this wsdl version.".to_owned())), err); + assert_eq!( + RusotoError::Service(GetQueueUrlError::QueueDoesNotExist( + "The specified queue does not exist for this wsdl version.".to_owned() + )), + err + ); } diff --git a/rusoto/services/sqs/src/custom/mod.rs b/rusoto/services/sqs/src/custom/mod.rs index 9a14b939383..e4234693714 100644 --- a/rusoto/services/sqs/src/custom/mod.rs +++ b/rusoto/services/sqs/src/custom/mod.rs @@ -1,2 +1,2 @@ #[cfg(test)] -mod custom_tests; \ No newline at end of file +mod custom_tests; diff --git a/rusoto/services/sqs/src/generated.rs b/rusoto/services/sqs/src/generated.rs index 382871a82ca..6f5c076ad14 100644 --- a/rusoto/services/sqs/src/generated.rs +++ b/rusoto/services/sqs/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -59,9 +58,9 @@ impl ActionNameListSerializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct AddPermissionRequest { - ///

The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon Simple Queue Service Developer Guide.

+ ///

The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon Simple Queue Service Developer Guide.

pub aws_account_ids: Vec, - ///

The action the client wants to allow for the specified principal. Valid values: the name of any action or *.

For more information about these actions, see Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon Simple Queue Service Developer Guide.

Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

+ ///

The action the client wants to allow for the specified principal. Valid values: the name of any action or *.

For more information about these actions, see Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon Simple Queue Service Developer Guide.

Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

pub actions: Vec, ///

The unique identification of the permission you're setting (for example, AliceSendMessage). Maximum 80 characters. Allowed characters include alphanumeric characters, hyphens (-), and underscores (_).

pub label: String, @@ -437,10 +436,12 @@ impl ChangeMessageVisibilityRequestSerializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct CreateQueueRequest { - ///

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Designates a queue as FIFO. Valid values: true, false. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

    For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

  • ContentBasedDeduplication - Enables content-based deduplication. Valid values: true, false. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

+ ///

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Designates a queue as FIFO. Valid values: true, false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

    For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

  • ContentBasedDeduplication - Enables content-based deduplication. Valid values: true, false. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

pub attributes: Option<::std::collections::HashMap>, ///

The name of the new queue. The following limits apply to this name:

  • A queue name can have up to 80 characters.

  • Valid values: alphanumeric characters, hyphens (-), and underscores (_).

  • A FIFO queue name must end with the .fifo suffix.

Queue URLs and names are case-sensitive.

pub queue_name: String, + ///

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ pub tags: Option<::std::collections::HashMap>, } /// Serialize `CreateQueueRequest` contents to a `SignedRequest`. @@ -460,6 +461,9 @@ impl CreateQueueRequestSerializer { ); } params.put(&format!("{}{}", prefix, "QueueName"), &obj.queue_name); + if let Some(ref field_value) = obj.tags { + TagMapSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); + } } } @@ -702,7 +706,7 @@ impl DeleteQueueRequestSerializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct GetQueueAttributesRequest { - ///

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

  • All - Returns all values.

  • ApproximateNumberOfMessages - Returns the approximate number of messages available for retrieval from the queue.

  • ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

  • ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

  • CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time).

  • DelaySeconds - Returns the default delay on the queue in seconds.

  • LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time).

  • MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

  • MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.

  • Policy - Returns the policy of the queue.

  • QueueArn - Returns the Amazon resource name (ARN) of the queue.

  • ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

  • RedrivePolicy - Returns the string that includes the parameters for dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

  • VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

  • KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

  • ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

+ ///

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

  • All - Returns all values.

  • ApproximateNumberOfMessages - Returns the approximate number of messages available for retrieval from the queue.

  • ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

  • ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

  • CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time).

  • DelaySeconds - Returns the default delay on the queue in seconds.

  • LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time).

  • MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

  • MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.

  • Policy - Returns the policy of the queue.

  • QueueArn - Returns the Amazon resource name (ARN) of the queue.

  • ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

  • RedrivePolicy - Returns the string that includes the parameters for dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

  • VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

  • KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

  • ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

pub attribute_names: Option>, ///

The URL of the Amazon SQS queue whose attribute information is retrieved.

Queue URLs and names are case-sensitive.

pub queue_url: String, @@ -788,7 +792,7 @@ impl GetQueueUrlRequestSerializer { } } -///

For more information, see Interpreting Responses in the Amazon Simple Queue Service Developer Guide.

+///

For more information, see Interpreting Responses in the Amazon Simple Queue Service Developer Guide.

#[derive(Default, Debug, Clone, PartialEq)] pub struct GetQueueUrlResult { ///

The URL of the queue.

@@ -966,7 +970,7 @@ pub struct Message { pub md5_of_body: Option, ///

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

pub md5_of_message_attributes: Option, - ///

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

+ ///

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

pub message_attributes: Option<::std::collections::HashMap>, ///

A unique identifier for the message. A MessageIdis considered unique across all AWS accounts for an extended period of time.

pub message_id: Option, @@ -1040,7 +1044,7 @@ pub struct MessageAttributeValue { pub binary_list_values: Option>, ///

Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.

pub binary_value: Option, - ///

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

+ ///

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

pub data_type: String, ///

Not implemented. Reserved for future use.

pub string_list_values: Option>, @@ -1160,6 +1164,26 @@ impl MessageBodyAttributeMapSerializer { } } +/// Serialize `MessageBodySystemAttributeMap` contents to a `SignedRequest`. +struct MessageBodySystemAttributeMapSerializer; +impl MessageBodySystemAttributeMapSerializer { + fn serialize( + params: &mut Params, + name: &str, + obj: &::std::collections::HashMap, + ) { + for (index, (key, value)) in obj.iter().enumerate() { + let prefix = format!("{}.{}", name, index + 1); + params.put(&format!("{}.{}", prefix, "Name"), &key); + MessageSystemAttributeValueSerializer::serialize( + params, + &format!("{}.{}", prefix, "Value"), + value, + ); + } + } +} + struct MessageListDeserializer; impl MessageListDeserializer { #[allow(unused_variables)] @@ -1218,6 +1242,57 @@ impl MessageSystemAttributeNameDeserializer { Ok(obj) } } +///

The user-specified message system attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

Name, type, value and the message body must not be empty or null.

+#[derive(Default, Debug, Clone, PartialEq)] +pub struct MessageSystemAttributeValue { + ///

Not implemented. Reserved for future use.

+ pub binary_list_values: Option>, + ///

Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.

+ pub binary_value: Option, + ///

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

+ pub data_type: String, + ///

Not implemented. Reserved for future use.

+ pub string_list_values: Option>, + ///

Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters.

+ pub string_value: Option, +} + +/// Serialize `MessageSystemAttributeValue` contents to a `SignedRequest`. +struct MessageSystemAttributeValueSerializer; +impl MessageSystemAttributeValueSerializer { + fn serialize(params: &mut Params, name: &str, obj: &MessageSystemAttributeValue) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + if let Some(ref field_value) = obj.binary_list_values { + BinaryListSerializer::serialize( + params, + &format!("{}{}", prefix, "BinaryListValue"), + field_value, + ); + } + if let Some(ref field_value) = obj.binary_value { + params.put( + &format!("{}{}", prefix, "BinaryValue"), + ::std::str::from_utf8(&field_value).unwrap(), + ); + } + params.put(&format!("{}{}", prefix, "DataType"), &obj.data_type); + if let Some(ref field_value) = obj.string_list_values { + StringListSerializer::serialize( + params, + &format!("{}{}", prefix, "StringListValue"), + field_value, + ); + } + if let Some(ref field_value) = obj.string_value { + params.put(&format!("{}{}", prefix, "StringValue"), &field_value); + } + } +} + ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct PurgeQueueRequest { @@ -1316,7 +1391,7 @@ impl QueueUrlListDeserializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct ReceiveMessageRequest { - ///

A list of s that need to be returned along with each message. These attributes include:

  • All - Returns all values.

  • ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).

  • ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.

  • SenderId

    • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

    • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

  • SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).

  • MessageDeduplicationId - Returns the value provided by the producer that calls the SendMessage action.

  • MessageGroupId - Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

  • SequenceNumber - Returns the value provided by Amazon SQS.

+ ///

A list of attributes that need to be returned along with each message. These attributes include:

  • All - Returns all values.

  • ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).

  • ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.

  • AWSTraceHeader - Returns the AWS X-Ray trace header string.

  • SenderId

    • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

    • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

  • SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).

  • MessageDeduplicationId - Returns the value provided by the producer that calls the SendMessage action.

  • MessageGroupId - Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

  • SequenceNumber - Returns the value provided by Amazon SQS.

pub attribute_names: Option>, ///

The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10. Default: 1.

pub max_number_of_messages: Option, @@ -1324,7 +1399,7 @@ pub struct ReceiveMessageRequest { pub message_attribute_names: Option>, ///

The URL of the Amazon SQS queue from which messages are received.

Queue URLs and names are case-sensitive.

pub queue_url: String, - ///

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

  • You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.

  • When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.

  • If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId.

  • You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).

  • During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

    If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error.

    To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.

  • While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.

  • If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.

The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

+ ///

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

  • You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.

  • When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.

  • If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId.

  • You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).

  • During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

    If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error.

    To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.

  • While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.

  • If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.

The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

pub receive_request_attempt_id: Option, ///

The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.

pub visibility_timeout: Option, @@ -1461,14 +1536,17 @@ pub struct SendMessageBatchRequestEntry { pub delay_seconds: Option, ///

An identifier for a message in this batch used to communicate the result.

The Ids of a batch request need to be unique within a request

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

pub id: String, - ///

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

+ ///

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

pub message_attributes: Option<::std::collections::HashMap>, ///

The body of the message.

pub message_body: String, - ///

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

+ ///

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

pub message_deduplication_id: Option, - ///

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!"#$%&'()*+,-./:;<=>?@[]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

+ ///

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!"#$%&'()*+,-./:;<=>?@[]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

pub message_group_id: Option, + ///

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

  • Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted AWS X-Ray trace string.

  • The size of a message system attribute doesn't count towards the total size of a message.

+ pub message_system_attributes: + Option<::std::collections::HashMap>, } /// Serialize `SendMessageBatchRequestEntry` contents to a `SignedRequest`. @@ -1501,6 +1579,13 @@ impl SendMessageBatchRequestEntrySerializer { if let Some(ref field_value) = obj.message_group_id { params.put(&format!("{}{}", prefix, "MessageGroupId"), &field_value); } + if let Some(ref field_value) = obj.message_system_attributes { + MessageBodySystemAttributeMapSerializer::serialize( + params, + &format!("{}{}", prefix, "MessageSystemAttribute"), + field_value, + ); + } } } @@ -1563,6 +1648,8 @@ pub struct SendMessageBatchResultEntry { pub md5_of_message_attributes: Option, ///

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

pub md5_of_message_body: String, + ///

An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

+ pub md5_of_message_system_attributes: Option, ///

An identifier for the message.

pub message_id: String, ///

This parameter applies only to FIFO (first-in-first-out) queues.

The large, non-consecutive number that Amazon SQS assigns to each message.

The length of SequenceNumber is 128 bits. As SequenceNumber continues to increase for a particular MessageGroupId.

@@ -1594,6 +1681,11 @@ impl SendMessageBatchResultEntryDeserializer { obj.md5_of_message_body = StringDeserializer::deserialize("MD5OfMessageBody", stack)?; } + "MD5OfMessageSystemAttributes" => { + obj.md5_of_message_system_attributes = Some( + StringDeserializer::deserialize("MD5OfMessageSystemAttributes", stack)?, + ); + } "MessageId" => { obj.message_id = StringDeserializer::deserialize("MessageId", stack)?; } @@ -1642,14 +1734,17 @@ impl SendMessageBatchResultEntryListDeserializer { pub struct SendMessageRequest { ///

The length of time, in seconds, for which to delay a specific message. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue applies.

When you set FifoQueue, you can't set DelaySeconds per message. You can set this parameter only on a queue level.

pub delay_seconds: Option, - ///

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

+ ///

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

pub message_attributes: Option<::std::collections::HashMap>, ///

The message to send. The maximum string size is 256 KB.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

pub message_body: String, - ///

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

+ ///

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

pub message_deduplication_id: Option, - ///

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!"#$%&'()*+,-./:;<=>?@[]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

+ ///

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!"#$%&'()*+,-./:;<=>?@[]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

pub message_group_id: Option, + ///

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

  • Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted AWS X-Ray trace string.

  • The size of a message system attribute doesn't count towards the total size of a message.

+ pub message_system_attributes: + Option<::std::collections::HashMap>, ///

The URL of the Amazon SQS queue to which a message is sent.

Queue URLs and names are case-sensitive.

pub queue_url: String, } @@ -1683,6 +1778,13 @@ impl SendMessageRequestSerializer { if let Some(ref field_value) = obj.message_group_id { params.put(&format!("{}{}", prefix, "MessageGroupId"), &field_value); } + if let Some(ref field_value) = obj.message_system_attributes { + MessageBodySystemAttributeMapSerializer::serialize( + params, + &format!("{}{}", prefix, "MessageSystemAttribute"), + field_value, + ); + } params.put(&format!("{}{}", prefix, "QueueUrl"), &obj.queue_url); } } @@ -1694,7 +1796,9 @@ pub struct SendMessageResult { pub md5_of_message_attributes: Option, ///

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

pub md5_of_message_body: Option, - ///

An attribute containing the MessageId of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

+ ///

An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest.

+ pub md5_of_message_system_attributes: Option, + ///

An attribute containing the MessageId of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

pub message_id: Option, ///

This parameter applies only to FIFO (first-in-first-out) queues.

The large, non-consecutive number that Amazon SQS assigns to each message.

The length of SequenceNumber is 128 bits. SequenceNumber continues to increase for a particular MessageGroupId.

pub sequence_number: Option, @@ -1719,6 +1823,12 @@ impl SendMessageResultDeserializer { obj.md5_of_message_body = Some(StringDeserializer::deserialize("MD5OfMessageBody", stack)?); } + "MD5OfMessageSystemAttributes" => { + obj.md5_of_message_system_attributes = Some(StringDeserializer::deserialize( + "MD5OfMessageSystemAttributes", + stack, + )?); + } "MessageId" => { obj.message_id = Some(StringDeserializer::deserialize("MessageId", stack)?); } @@ -1735,7 +1845,7 @@ impl SendMessageResultDeserializer { ///

#[derive(Default, Debug, Clone, PartialEq)] pub struct SetQueueAttributesRequest { - ///

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attribute applies only to FIFO (first-in-first-out) queues:

  • ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

+ ///

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attribute applies only to FIFO (first-in-first-out) queues:

  • ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

pub attributes: ::std::collections::HashMap, ///

The URL of the Amazon SQS queue whose attributes are set.

Queue URLs and names are case-sensitive.

pub queue_url: String, @@ -2944,10 +3054,10 @@ impl Error for UntagQueueError { } /// Trait representing the capabilities of the Amazon SQS API. Amazon SQS clients implement this trait. pub trait Sqs { - ///

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

AddPermission writes an Amazon-SQS-generated policy. If you want to write your own policy, use SetQueueAttributes to upload your policy. For more information about writing your own policy, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

An Amazon SQS policy can have a maximum of 7 actions.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

  • AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

  • An Amazon SQS policy can have a maximum of 7 actions.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn add_permission(&self, input: AddPermissionRequest) -> RusotoFuture<(), AddPermissionError>; - ///

Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value is 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend the visibility timeout beyond 12 hours, your request is rejected.

A message is considered to be in flight after it's received from a queue by a consumer, but not yet deleted from the queue.

For standard queues, there can be a maximum of 120,000 inflight messages per queue. If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.

For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

+ ///

Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to the maximum allowed time. If you try to extend the visibility timeout beyond the maximum, your request is rejected.

An Amazon SQS message has three basic states:

  1. Sent to a queue by a producer.

  2. Received from the queue by a consumer.

  3. Deleted from the queue.

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of inflight messages.

Limits that apply to inflight messages are unrelated to the unlimited number of stored messages.

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request.

For FIFO queues, there can be a maximum of 20,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

fn change_message_visibility( &self, input: ChangeMessageVisibilityRequest, @@ -2959,7 +3069,7 @@ pub trait Sqs { input: ChangeMessageVisibilityBatchRequest, ) -> RusotoFuture; - ///

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn create_queue( &self, input: CreateQueueRequest, @@ -2974,34 +3084,34 @@ pub trait Sqs { input: DeleteMessageBatchRequest, ) -> RusotoFuture; - ///

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn delete_queue(&self, input: DeleteQueueRequest) -> RusotoFuture<(), DeleteQueueError>; - ///

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

+ ///

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

fn get_queue_attributes( &self, input: GetQueueAttributesRequest, ) -> RusotoFuture; - ///

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

+ ///

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

fn get_queue_url( &self, input: GetQueueUrlRequest, ) -> RusotoFuture; - ///

Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

+ ///

Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

fn list_dead_letter_source_queues( &self, input: ListDeadLetterSourceQueuesRequest, ) -> RusotoFuture; - ///

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn list_queue_tags( &self, input: ListQueueTagsRequest, ) -> RusotoFuture; - ///

Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn list_queues( &self, input: ListQueuesRequest, @@ -3010,13 +3120,13 @@ pub trait Sqs { ///

Deletes the messages in a queue specified by the QueueURL parameter.

When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.

The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.

Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.

Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.

fn purge_queue(&self, input: PurgeQueueRequest) -> RusotoFuture<(), PurgeQueueError>; - ///

Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon Simple Queue Service Developer Guide.

Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.

For each message returned, the response includes the following:

  • The message body.

  • An MD5 digest of the message body. For information about MD5, see RFC1321.

  • The MessageId you received when you sent the message to the queue.

  • The receipt handle.

  • The message attributes.

  • An MD5 digest of the message attributes.

The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

+ ///

Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon Simple Queue Service Developer Guide.

Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.

For each message returned, the response includes the following:

  • The message body.

  • An MD5 digest of the message body. For information about MD5, see RFC1321.

  • The MessageId you received when you sent the message to the queue.

  • The receipt handle.

  • The message attributes.

  • An MD5 digest of the message attributes.

The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

fn receive_message( &self, input: ReceiveMessageRequest, ) -> RusotoFuture; - ///

Revokes any permissions in the queue policy that matches the specified Label parameter.

Only the owner of a queue can remove permissions from it.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Revokes any permissions in the queue policy that matches the specified Label parameter.

  • Only the owner of a queue can remove permissions from it.

  • Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

fn remove_permission( &self, input: RemovePermissionRequest, @@ -3034,16 +3144,16 @@ pub trait Sqs { input: SendMessageBatchRequest, ) -> RusotoFuture; - ///

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

  • In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

  • Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

fn set_queue_attributes( &self, input: SetQueueAttributesRequest, ) -> RusotoFuture<(), SetQueueAttributesError>; - ///

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn tag_queue(&self, input: TagQueueRequest) -> RusotoFuture<(), TagQueueError>; - ///

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn untag_queue(&self, input: UntagQueueRequest) -> RusotoFuture<(), UntagQueueError>; } /// A client for the Amazon SQS API. @@ -3058,10 +3168,7 @@ impl SqsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SqsClient { - SqsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3075,15 +3182,19 @@ impl SqsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SqsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SqsClient { + SqsClient { client, region } } } impl Sqs for SqsClient { - ///

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

AddPermission writes an Amazon-SQS-generated policy. If you want to write your own policy, use SetQueueAttributes to upload your policy. For more information about writing your own policy, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

An Amazon SQS policy can have a maximum of 7 actions.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

  • AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

  • An Amazon SQS policy can have a maximum of 7 actions.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn add_permission(&self, input: AddPermissionRequest) -> RusotoFuture<(), AddPermissionError> { let mut request = SignedRequest::new("POST", "sqs", &self.region, "/"); let mut params = Params::new(); @@ -3108,7 +3219,7 @@ impl Sqs for SqsClient { }) } - ///

Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value is 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend the visibility timeout beyond 12 hours, your request is rejected.

A message is considered to be in flight after it's received from a queue by a consumer, but not yet deleted from the queue.

For standard queues, there can be a maximum of 120,000 inflight messages per queue. If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.

For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

+ ///

Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to the maximum allowed time. If you try to extend the visibility timeout beyond the maximum, your request is rejected.

An Amazon SQS message has three basic states:

  1. Sent to a queue by a producer.

  2. Received from the queue by a consumer.

  3. Deleted from the queue.

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of inflight messages.

Limits that apply to inflight messages are unrelated to the unlimited number of stored messages.

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request.

For FIFO queues, there can be a maximum of 20,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

fn change_message_visibility( &self, input: ChangeMessageVisibilityRequest, @@ -3162,7 +3273,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3181,7 +3292,7 @@ impl Sqs for SqsClient { }) } - ///

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn create_queue( &self, input: CreateQueueRequest, @@ -3213,7 +3324,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3289,7 +3400,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3308,7 +3419,7 @@ impl Sqs for SqsClient { }) } - ///

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn delete_queue(&self, input: DeleteQueueRequest) -> RusotoFuture<(), DeleteQueueError> { let mut request = SignedRequest::new("POST", "sqs", &self.region, "/"); let mut params = Params::new(); @@ -3333,7 +3444,7 @@ impl Sqs for SqsClient { }) } - ///

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

+ ///

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

fn get_queue_attributes( &self, input: GetQueueAttributesRequest, @@ -3365,7 +3476,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3384,7 +3495,7 @@ impl Sqs for SqsClient { }) } - ///

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

+ ///

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

fn get_queue_url( &self, input: GetQueueUrlRequest, @@ -3416,7 +3527,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3435,7 +3546,7 @@ impl Sqs for SqsClient { }) } - ///

Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

+ ///

Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

fn list_dead_letter_source_queues( &self, input: ListDeadLetterSourceQueuesRequest, @@ -3464,7 +3575,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3483,7 +3594,7 @@ impl Sqs for SqsClient { }) } - ///

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn list_queue_tags( &self, input: ListQueueTagsRequest, @@ -3515,7 +3626,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3534,7 +3645,7 @@ impl Sqs for SqsClient { }) } - ///

Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn list_queues( &self, input: ListQueuesRequest, @@ -3566,7 +3677,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3608,7 +3719,7 @@ impl Sqs for SqsClient { }) } - ///

Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon Simple Queue Service Developer Guide.

Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.

For each message returned, the response includes the following:

  • The message body.

  • An MD5 digest of the message body. For information about MD5, see RFC1321.

  • The MessageId you received when you sent the message to the queue.

  • The receipt handle.

  • The message attributes.

  • An MD5 digest of the message attributes.

The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

+ ///

Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon Simple Queue Service Developer Guide.

Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.

For each message returned, the response includes the following:

  • The message body.

  • An MD5 digest of the message body. For information about MD5, see RFC1321.

  • The MessageId you received when you sent the message to the queue.

  • The receipt handle.

  • The message attributes.

  • An MD5 digest of the message attributes.

The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

fn receive_message( &self, input: ReceiveMessageRequest, @@ -3640,7 +3751,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3659,7 +3770,7 @@ impl Sqs for SqsClient { }) } - ///

Revokes any permissions in the queue policy that matches the specified Label parameter.

Only the owner of a queue can remove permissions from it.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Revokes any permissions in the queue policy that matches the specified Label parameter.

  • Only the owner of a queue can remove permissions from it.

  • Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

fn remove_permission( &self, input: RemovePermissionRequest, @@ -3719,7 +3830,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3770,7 +3881,7 @@ impl Sqs for SqsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -3789,7 +3900,7 @@ impl Sqs for SqsClient { }) } - ///

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

  • In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

  • Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

fn set_queue_attributes( &self, input: SetQueueAttributesRequest, @@ -3817,7 +3928,7 @@ impl Sqs for SqsClient { }) } - ///

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn tag_queue(&self, input: TagQueueRequest) -> RusotoFuture<(), TagQueueError> { let mut request = SignedRequest::new("POST", "sqs", &self.region, "/"); let mut params = Params::new(); @@ -3842,7 +3953,7 @@ impl Sqs for SqsClient { }) } - ///

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

+ ///

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

fn untag_queue(&self, input: UntagQueueRequest) -> RusotoFuture<(), UntagQueueError> { let mut request = SignedRequest::new("POST", "sqs", &self.region, "/"); let mut params = Params::new(); diff --git a/rusoto/services/sqs/src/lib.rs b/rusoto/services/sqs/src/lib.rs index 161fcbed52f..5d97ee2686c 100644 --- a/rusoto/services/sqs/src/lib.rs +++ b/rusoto/services/sqs/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Welcome to the Amazon Simple Queue Service API Reference.

Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components.

Standard queues are available in all regions. FIFO queues are available in the US East (N. Virginia), US East (Ohio), US West (Oregon), and EU (Ireland) regions.

You can use AWS SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically:

  • Cryptographically sign your service requests

  • Retry requests

  • Handle error responses

Additional Information

+//!

Welcome to the Amazon Simple Queue Service API Reference.

Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components.

You can use AWS SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically:

  • Cryptographically sign your service requests

  • Retry requests

  • Handle error responses

Additional Information

//! //! If you're using the service, you're probably looking for [SqsClient](struct.SqsClient.html) and [Sqs](trait.Sqs.html). diff --git a/rusoto/services/ssm/Cargo.toml b/rusoto/services/ssm/Cargo.toml index 68377f1a341..a274f753b84 100644 --- a/rusoto/services/ssm/Cargo.toml +++ b/rusoto/services/ssm/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_ssm" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/ssm/README.md b/rusoto/services/ssm/README.md index 3366332b7aa..68ade52f650 100644 --- a/rusoto/services/ssm/README.md +++ b/rusoto/services/ssm/README.md @@ -23,9 +23,16 @@ To use `rusoto_ssm` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_ssm = "0.40.0" +rusoto_ssm = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/ssm/src/custom/mod.rs b/rusoto/services/ssm/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/ssm/src/custom/mod.rs +++ b/rusoto/services/ssm/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/ssm/src/generated.rs b/rusoto/services/ssm/src/generated.rs index ba7c6dffe5e..58f2aa4e9cd 100644 --- a/rusoto/services/ssm/src/generated.rs +++ b/rusoto/services/ssm/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An activation registers one or more on-premises servers or virtual machines (VMs) with AWS so that you can configure those servers or VMs using Run Command. A server or VM that has been registered with AWS is called a managed instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Activation { ///

The ID created by Systems Manager when you submitted the activation.

#[serde(rename = "ActivationId")] @@ -84,12 +83,12 @@ pub struct AddTagsToResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsToResourceResult {} ///

Describes an association of a Systems Manager document and an instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Association { ///

The ID created by the system when you create an association. An association is a binding between a document and a set of targets with a schedule.

#[serde(rename = "AssociationId")] @@ -135,7 +134,7 @@ pub struct Association { ///

Describes the parameters for a document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociationDescription { ///

The association ID.

#[serde(rename = "AssociationId")] @@ -221,7 +220,7 @@ pub struct AssociationDescription { ///

Includes information about the specified association.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociationExecution { ///

The association ID.

#[serde(rename = "AssociationId")] @@ -273,7 +272,7 @@ pub struct AssociationExecutionFilter { ///

Includes information about the specified association execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociationExecutionTarget { ///

The association ID.

#[serde(rename = "AssociationId")] @@ -337,7 +336,7 @@ pub struct AssociationFilter { ///

Information about the association.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociationOverview { ///

Returns the number of targets for the association status. For example, if you created an association with two instances, and one of them was successful, this would return the count of instances by status.

#[serde(rename = "AssociationStatusAggregatedCount")] @@ -373,7 +372,7 @@ pub struct AssociationStatus { ///

Information about the association version.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociationVersionInfo { ///

The ID created by the system when the association was created.

#[serde(rename = "AssociationId")] @@ -431,7 +430,7 @@ pub struct AssociationVersionInfo { ///

A structure that includes attributes that describe a document attachment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachmentContent { ///

The cryptographic hash value of the document content.

#[serde(rename = "Hash")] @@ -457,7 +456,7 @@ pub struct AttachmentContent { ///

An attribute of an attachment, such as the attachment name.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachmentInformation { ///

The name of the attachment.

#[serde(rename = "Name")] @@ -480,7 +479,7 @@ pub struct AttachmentsSource { ///

Detailed information about the current state of an individual Automation execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutomationExecution { ///

The execution ID.

#[serde(rename = "AutomationExecutionId")] @@ -597,7 +596,7 @@ pub struct AutomationExecutionFilter { ///

Details about a specific Automation execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AutomationExecutionMetadata { ///

The execution ID.

#[serde(rename = "AutomationExecutionId")] @@ -703,7 +702,7 @@ pub struct CancelCommandRequest { ///

Whether or not the command was successfully canceled. There is no guarantee that a request can be canceled.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelCommandResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -714,7 +713,7 @@ pub struct CancelMaintenanceWindowExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelMaintenanceWindowExecutionResult { ///

The ID of the maintenance window execution that has been stopped.

#[serde(rename = "WindowExecutionId")] @@ -737,7 +736,7 @@ pub struct CloudWatchOutputConfig { ///

Describes a command request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Command { ///

CloudWatch Logs information where you want Systems Manager to send the command output.

#[serde(rename = "CloudWatchOutputConfig")] @@ -846,7 +845,7 @@ pub struct CommandFilter { ///

An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user runs SendCommand against three instances, then a command invocation is created for each requested instance ID. A command invocation returns status and detail information about a command you ran.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CommandInvocation { ///

CloudWatch Logs information where you want Systems Manager to send the command output.

#[serde(rename = "CloudWatchOutputConfig")] @@ -915,7 +914,7 @@ pub struct CommandInvocation { ///

Describes plugin details.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CommandPlugin { ///

The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, aws:runShellScript, or aws:updateSSMAgent.

#[serde(rename = "Name")] @@ -985,7 +984,7 @@ pub struct ComplianceExecutionSummary { ///

Information about the compliance as defined by the resource type. For example, for a patch resource type, Items includes information about the PatchSeverity, Classification, etc.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceItem { ///

The compliance type. For example, Association (for a State Manager association), Patch, or Custom:string are all valid compliance types.

#[serde(rename = "ComplianceType")] @@ -1067,7 +1066,7 @@ pub struct ComplianceStringFilter { ///

A summary of compliance information by compliance type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComplianceSummaryItem { ///

The type of compliance item. For example, the compliance type can be Association, Patch, or Custom:string.

#[serde(rename = "ComplianceType")] @@ -1085,7 +1084,7 @@ pub struct ComplianceSummaryItem { ///

A summary of resources that are compliant. The summary is organized according to the resource count for each compliance type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompliantSummary { ///

The total number of resources that are compliant.

#[serde(rename = "CompliantCount")] @@ -1125,7 +1124,7 @@ pub struct CreateActivationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateActivationResult { ///

The code the system generates when it processes the activation. The activation code functions like a password to validate the activation ID.

#[serde(rename = "ActivationCode")] @@ -1197,7 +1196,7 @@ pub struct CreateAssociationBatchRequestEntry { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAssociationBatchResult { ///

Information about the associations that failed.

#[serde(rename = "Failed")] @@ -1227,7 +1226,7 @@ pub struct CreateAssociationRequest { #[serde(rename = "DocumentVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub document_version: Option, - ///

The instance ID.

+ ///

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. If you use the parameter InstanceId, you cannot use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

#[serde(rename = "InstanceId")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_id: Option, @@ -1254,14 +1253,14 @@ pub struct CreateAssociationRequest { #[serde(rename = "ScheduleExpression")] #[serde(skip_serializing_if = "Option::is_none")] pub schedule_expression: Option, - ///

The targets (either instances or tags) for the association.

+ ///

The targets (either instances or tags) for the association. You must specify a value for Targets if you don't specify a value for InstanceId.

#[serde(rename = "Targets")] #[serde(skip_serializing_if = "Option::is_none")] pub targets: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAssociationResult { ///

Information about the association.

#[serde(rename = "AssociationDescription")] @@ -1304,7 +1303,7 @@ pub struct CreateDocumentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateDocumentResult { ///

Information about the Systems Manager document.

#[serde(rename = "DocumentDescription")] @@ -1356,7 +1355,7 @@ pub struct CreateMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateMaintenanceWindowResult { ///

The ID of the created maintenance window.

#[serde(rename = "WindowId")] @@ -1373,7 +1372,7 @@ pub struct CreateOpsItemRequest { #[serde(rename = "Notifications")] #[serde(skip_serializing_if = "Option::is_none")] pub notifications: Option>, - ///

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

This custom data is searchable, but with restrictions. For the Searchable operational data feature, all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. For the Private operational data feature, the data is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

+ ///

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems Manually in the AWS Systems Manager User Guide.

#[serde(rename = "OperationalData")] #[serde(skip_serializing_if = "Option::is_none")] pub operational_data: Option<::std::collections::HashMap>, @@ -1388,7 +1387,7 @@ pub struct CreateOpsItemRequest { ///

The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager.

#[serde(rename = "Source")] pub source: String, - ///

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an OpsItem to identify the AWS resource or the type of issue. In this case, you could specify the following key name/value pairs:

  • Key=source,Value=EC2-instance

  • Key=status,Value=stopped

To add tags to an existing OpsItem, use the AddTagsToResource action.

+ ///

Optional metadata that you assign to a resource. You can restrict access to OpsItems by using an inline IAM policy that specifies tags. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Tags use a key-value pair. For example:

Key=Department,Value=Finance

To add tags to an existing OpsItem, use the AddTagsToResource action.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -1398,7 +1397,7 @@ pub struct CreateOpsItemRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateOpsItemResponse { ///

The ID of the OpsItem.

#[serde(rename = "OpsItemId")] @@ -1462,7 +1461,7 @@ pub struct CreatePatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreatePatchBaselineResult { ///

The ID of the created patch baseline.

#[serde(rename = "BaselineId")] @@ -1481,7 +1480,7 @@ pub struct CreateResourceDataSyncRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceDataSyncResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1492,7 +1491,7 @@ pub struct DeleteActivationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteActivationResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1512,18 +1511,26 @@ pub struct DeleteAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAssociationResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteDocumentRequest { + ///

The version of the document that you want to delete. If not provided, all versions of the document are deleted.

+ #[serde(rename = "DocumentVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub document_version: Option, ///

The name of the document.

#[serde(rename = "Name")] pub name: String, + ///

The version name of the document that you want to delete. If not provided, all versions of the document are deleted.

+ #[serde(rename = "VersionName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub version_name: Option, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteDocumentResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1546,7 +1553,7 @@ pub struct DeleteInventoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteInventoryResult { ///

Every DeleteInventory action is assigned a unique ID. This option returns a unique ID. You can use this ID to query the status of a delete operation. This option is useful for ensuring that a delete operation has completed before you begin other actions.

#[serde(rename = "DeletionId")] @@ -1570,7 +1577,7 @@ pub struct DeleteMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteMaintenanceWindowResult { ///

The ID of the deleted maintenance window.

#[serde(rename = "WindowId")] @@ -1586,7 +1593,7 @@ pub struct DeleteParameterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteParameterResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1597,7 +1604,7 @@ pub struct DeleteParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteParametersResult { ///

The names of the deleted parameters.

#[serde(rename = "DeletedParameters")] @@ -1617,7 +1624,7 @@ pub struct DeletePatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePatchBaselineResult { ///

The ID of the deleted patch baseline.

#[serde(rename = "BaselineId")] @@ -1633,7 +1640,7 @@ pub struct DeleteResourceDataSyncRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResourceDataSyncResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1644,7 +1651,7 @@ pub struct DeregisterManagedInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterManagedInstanceResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1658,7 +1665,7 @@ pub struct DeregisterPatchBaselineForPatchGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterPatchBaselineForPatchGroupResult { ///

The ID of the patch baseline the patch group was deregistered from.

#[serde(rename = "BaselineId")] @@ -1685,7 +1692,7 @@ pub struct DeregisterTargetFromMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterTargetFromMaintenanceWindowResult { ///

The ID of the maintenance window the target was removed from.

#[serde(rename = "WindowId")] @@ -1708,7 +1715,7 @@ pub struct DeregisterTaskFromMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterTaskFromMaintenanceWindowResult { ///

The ID of the maintenance window the task was removed from.

#[serde(rename = "WindowId")] @@ -1750,7 +1757,7 @@ pub struct DescribeActivationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeActivationsResult { ///

A list of activations for your AWS account.

#[serde(rename = "ActivationList")] @@ -1785,7 +1792,7 @@ pub struct DescribeAssociationExecutionTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAssociationExecutionTargetsResult { ///

Information about the execution.

#[serde(rename = "AssociationExecutionTargets")] @@ -1817,7 +1824,7 @@ pub struct DescribeAssociationExecutionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAssociationExecutionsResult { ///

A list of the executions for the specified association ID.

#[serde(rename = "AssociationExecutions")] @@ -1850,7 +1857,7 @@ pub struct DescribeAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAssociationResult { ///

Information about the association.

#[serde(rename = "AssociationDescription")] @@ -1875,7 +1882,7 @@ pub struct DescribeAutomationExecutionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAutomationExecutionsResult { ///

The list of details about each automation execution which has occurred which matches the filter specification, if any.

#[serde(rename = "AutomationExecutionMetadataList")] @@ -1911,7 +1918,7 @@ pub struct DescribeAutomationStepExecutionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAutomationStepExecutionsResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -1940,7 +1947,7 @@ pub struct DescribeAvailablePatchesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAvailablePatchesResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -1963,7 +1970,7 @@ pub struct DescribeDocumentPermissionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDocumentPermissionResponse { ///

The account IDs that have permission to use this document. The ID can be either an AWS account or All.

#[serde(rename = "AccountIds")] @@ -1987,7 +1994,7 @@ pub struct DescribeDocumentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDocumentResult { ///

Information about the Systems Manager document.

#[serde(rename = "Document")] @@ -2011,7 +2018,7 @@ pub struct DescribeEffectiveInstanceAssociationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEffectiveInstanceAssociationsResult { ///

The associations for the requested instance.

#[serde(rename = "Associations")] @@ -2039,7 +2046,7 @@ pub struct DescribeEffectivePatchesForPatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeEffectivePatchesForPatchBaselineResult { ///

An array of patches and patch status.

#[serde(rename = "EffectivePatches")] @@ -2067,7 +2074,7 @@ pub struct DescribeInstanceAssociationsStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstanceAssociationsStatusResult { ///

Status information about the association.

#[serde(rename = "InstanceAssociationStatusInfos")] @@ -2100,7 +2107,7 @@ pub struct DescribeInstanceInformationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstanceInformationResult { ///

The instance information list.

#[serde(rename = "InstanceInformationList")] @@ -2132,7 +2139,7 @@ pub struct DescribeInstancePatchStatesForPatchGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstancePatchStatesForPatchGroupResult { ///

The high-level patch state for the requested instances.

#[serde(rename = "InstancePatchStates")] @@ -2160,7 +2167,7 @@ pub struct DescribeInstancePatchStatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstancePatchStatesResult { ///

The high-level patch state for the requested instances.

#[serde(rename = "InstancePatchStates")] @@ -2174,7 +2181,7 @@ pub struct DescribeInstancePatchStatesResult { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeInstancePatchesRequest { - ///

Each entry in the array is a structure containing:

Key (string, between 1 and 128 characters)

Values (array of strings, each string between 1 and 256 characters)

+ ///

An array of structures. Each entry in the array is a structure containing a Key, Value combination. Valid values for Key are Classification | KBId | Severity | State.

#[serde(rename = "Filters")] #[serde(skip_serializing_if = "Option::is_none")] pub filters: Option>, @@ -2192,7 +2199,7 @@ pub struct DescribeInstancePatchesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInstancePatchesResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2221,7 +2228,7 @@ pub struct DescribeInventoryDeletionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeInventoryDeletionsResult { ///

A list of status items for deleted inventory.

#[serde(rename = "InventoryDeletions")] @@ -2256,7 +2263,7 @@ pub struct DescribeMaintenanceWindowExecutionTaskInvocationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowExecutionTaskInvocationsResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2289,7 +2296,7 @@ pub struct DescribeMaintenanceWindowExecutionTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowExecutionTasksResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2321,7 +2328,7 @@ pub struct DescribeMaintenanceWindowExecutionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowExecutionsResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2362,7 +2369,7 @@ pub struct DescribeMaintenanceWindowScheduleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowScheduleResult { ///

The token for the next set of items to return. (You use this token in the next call.)

#[serde(rename = "NextToken")] @@ -2394,7 +2401,7 @@ pub struct DescribeMaintenanceWindowTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowTargetsResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2426,7 +2433,7 @@ pub struct DescribeMaintenanceWindowTasksRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowTasksResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2457,7 +2464,7 @@ pub struct DescribeMaintenanceWindowsForTargetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowsForTargetResult { ///

The token for the next set of items to return. (You use this token in the next call.)

#[serde(rename = "NextToken")] @@ -2486,7 +2493,7 @@ pub struct DescribeMaintenanceWindowsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceWindowsResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2508,14 +2515,14 @@ pub struct DescribeOpsItemsRequest { #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option, - ///

One or more filters to limit the reponse.

  • Key: CreatedTime

    Operations: GreaterThan, LessThan

  • Key: LastModifiedBy

    Operations: Contains, Equals

  • Key: LastModifiedTime

    Operations: GreaterThan, LessThan

  • Key: Priority

    Operations: Equals

  • Key: Source

    Operations: Contains, Equals

  • Key: Status

    Operations: Equals

  • Key: Title

    Operations: Contains

  • Key: OperationalData

    Operations: Equals

  • Key: OperationalDataKey

    Operations: Equals

  • Key: OperationalDataValue

    Operations: Equals, Contains

  • Key: OpsItemId

    Operations: Equals

  • Key: ResourceId

    Operations: Contains

  • Key: AutomationId

    Operations: Equals

+ ///

One or more filters to limit the reponse.

  • Key: CreatedTime

    Operations: GreaterThan, LessThan

  • Key: LastModifiedBy

    Operations: Contains, Equals

  • Key: LastModifiedTime

    Operations: GreaterThan, LessThan

  • Key: Priority

    Operations: Equals

  • Key: Source

    Operations: Contains, Equals

  • Key: Status

    Operations: Equals

  • Key: Title

    Operations: Contains

  • Key: OperationalData*

    Operations: Equals

  • Key: OperationalDataKey

    Operations: Equals

  • Key: OperationalDataValue

    Operations: Equals, Contains

  • Key: OpsItemId

    Operations: Equals

  • Key: ResourceId

    Operations: Contains

  • Key: AutomationId

    Operations: Equals

*If you filter the response by using the OperationalData operator, specify a key-value pair by using the following JSON format: {"key":"key_name","value":"a_value"}

#[serde(rename = "OpsItemFilters")] #[serde(skip_serializing_if = "Option::is_none")] pub ops_item_filters: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOpsItemsResponse { ///

The token for the next set of items to return. Use this token to get the next set of results.

#[serde(rename = "NextToken")] @@ -2548,7 +2555,7 @@ pub struct DescribeParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeParametersResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -2577,7 +2584,7 @@ pub struct DescribePatchBaselinesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePatchBaselinesResult { ///

An array of PatchBaselineIdentity elements.

#[serde(rename = "BaselineIdentities")] @@ -2597,7 +2604,7 @@ pub struct DescribePatchGroupStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePatchGroupStateResult { ///

The number of instances in the patch group.

#[serde(rename = "Instances")] @@ -2650,7 +2657,7 @@ pub struct DescribePatchGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePatchGroupsResult { ///

Each entry in the array contains:

PatchGroup: string (between 1 and 256 characters, Regex: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$)

PatchBaselineIdentity: A PatchBaselineIdentity element.

#[serde(rename = "Mappings")] @@ -2685,7 +2692,7 @@ pub struct DescribePatchPropertiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribePatchPropertiesResult { ///

The token for the next set of items to return. (You use this token in the next call.)

#[serde(rename = "NextToken")] @@ -2717,7 +2724,7 @@ pub struct DescribeSessionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSessionsResponse { ///

The token for the next set of items to return. (You received this token from a previous call.)

#[serde(rename = "NextToken")] @@ -2731,7 +2738,7 @@ pub struct DescribeSessionsResponse { ///

A default version of a document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentDefaultVersionDescription { ///

The default version of the document.

#[serde(rename = "DefaultVersion")] @@ -2749,7 +2756,7 @@ pub struct DocumentDefaultVersionDescription { ///

Describes a Systems Manager document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentDescription { ///

Details about the document attachments, including names, locations, sizes, etc.

#[serde(rename = "AttachmentsInformation")] @@ -2850,7 +2857,7 @@ pub struct DocumentFilter { ///

Describes the name of a Systems Manager document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentIdentifier { ///

The document format, either JSON or YAML.

#[serde(rename = "DocumentFormat")] @@ -2909,7 +2916,7 @@ pub struct DocumentKeyValuesFilter { ///

Parameters specified in a System Manager document that run on the server when the command is run.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentParameter { ///

If specified, the default values for the parameters. Parameters without a default value are required. Parameters with a default value are optional.

#[serde(rename = "DefaultValue")] @@ -2931,7 +2938,7 @@ pub struct DocumentParameter { ///

Version information about the document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentVersionInfo { ///

The date the document was created.

#[serde(rename = "CreatedDate")] @@ -2969,7 +2976,7 @@ pub struct DocumentVersionInfo { ///

The EffectivePatch structure defines metadata about a patch along with the approval state of the patch in a particular patch baseline. The approval state includes information about whether the patch is currently approved, due to be approved by a rule, explicitly approved, or explicitly rejected and the date the patch was or will be approved.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EffectivePatch { ///

Provides metadata for a patch, including information such as the KB ID, severity, classification and a URL for where more information can be obtained about the patch.

#[serde(rename = "Patch")] @@ -2983,7 +2990,7 @@ pub struct EffectivePatch { ///

Describes a failed association.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedCreateAssociation { ///

The association.

#[serde(rename = "Entry")] @@ -3001,7 +3008,7 @@ pub struct FailedCreateAssociation { ///

Information about an Automation failure.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailureDetails { ///

Detailed information about the Automation step failure.

#[serde(rename = "Details")] @@ -3025,7 +3032,7 @@ pub struct GetAutomationExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetAutomationExecutionResult { ///

Detailed information about the current state of an automation execution.

#[serde(rename = "AutomationExecution")] @@ -3048,7 +3055,7 @@ pub struct GetCommandInvocationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCommandInvocationResult { ///

CloudWatch Logs information where Systems Manager sent the command output.

#[serde(rename = "CloudWatchOutputConfig")] @@ -3128,7 +3135,7 @@ pub struct GetConnectionStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetConnectionStatusResponse { ///

The status of the connection to the instance. For example, 'Connected' or 'Not Connected'.

#[serde(rename = "Status")] @@ -3149,7 +3156,7 @@ pub struct GetDefaultPatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDefaultPatchBaselineResult { ///

The ID of the default patch baseline.

#[serde(rename = "BaselineId")] @@ -3172,7 +3179,7 @@ pub struct GetDeployablePatchSnapshotForInstanceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDeployablePatchSnapshotForInstanceResult { ///

The ID of the instance.

#[serde(rename = "InstanceId")] @@ -3212,7 +3219,7 @@ pub struct GetDocumentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDocumentResult { ///

A description of the document attachments, including names, locations, sizes, etc.

#[serde(rename = "AttachmentsContent")] @@ -3277,7 +3284,7 @@ pub struct GetInventoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInventoryResult { ///

Collection of inventory entities such as a collection of instance inventory.

#[serde(rename = "Entities")] @@ -3314,7 +3321,7 @@ pub struct GetInventorySchemaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetInventorySchemaResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -3334,7 +3341,7 @@ pub struct GetMaintenanceWindowExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMaintenanceWindowExecutionResult { ///

The time the maintenance window finished running.

#[serde(rename = "EndTime")] @@ -3376,7 +3383,7 @@ pub struct GetMaintenanceWindowExecutionTaskInvocationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMaintenanceWindowExecutionTaskInvocationResult { ///

The time that the task finished running on the target.

#[serde(rename = "EndTime")] @@ -3414,7 +3421,7 @@ pub struct GetMaintenanceWindowExecutionTaskInvocationResult { #[serde(rename = "TaskExecutionId")] #[serde(skip_serializing_if = "Option::is_none")] pub task_execution_id: Option, - ///

Retrieves the task type for a maintenance window. Task types include the following: LAMBDA, STEP_FUNCTION, AUTOMATION, RUN_COMMAND.

+ ///

Retrieves the task type for a maintenance window. Task types include the following: LAMBDA, STEP_FUNCTIONS, AUTOMATION, RUN_COMMAND.

#[serde(rename = "TaskType")] #[serde(skip_serializing_if = "Option::is_none")] pub task_type: Option, @@ -3439,7 +3446,7 @@ pub struct GetMaintenanceWindowExecutionTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMaintenanceWindowExecutionTaskResult { ///

The time the task execution completed.

#[serde(rename = "EndTime")] @@ -3505,7 +3512,7 @@ pub struct GetMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMaintenanceWindowResult { ///

Whether targets must be registered with the maintenance window before tasks can be defined for those targets.

#[serde(rename = "AllowUnassociatedTargets")] @@ -3576,7 +3583,7 @@ pub struct GetMaintenanceWindowTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMaintenanceWindowTaskResult { ///

The retrieved task description.

#[serde(rename = "Description")] @@ -3610,7 +3617,7 @@ pub struct GetMaintenanceWindowTaskResult { #[serde(rename = "Targets")] #[serde(skip_serializing_if = "Option::is_none")] pub targets: Option>, - ///

The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTION tasks, the value is the state machine ARN.

+ ///

The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTIONS tasks, the value is the state machine ARN.

#[serde(rename = "TaskArn")] #[serde(skip_serializing_if = "Option::is_none")] pub task_arn: Option, @@ -3645,7 +3652,7 @@ pub struct GetOpsItemRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOpsItemResponse { ///

The OpsItem.

#[serde(rename = "OpsItem")] @@ -3673,7 +3680,7 @@ pub struct GetOpsSummaryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetOpsSummaryResult { ///

The list of aggregated and filtered OpsItems.

#[serde(rename = "Entities")] @@ -3705,7 +3712,7 @@ pub struct GetParameterHistoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetParameterHistoryResult { ///

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

#[serde(rename = "NextToken")] @@ -3729,7 +3736,7 @@ pub struct GetParameterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetParameterResult { ///

Information about a parameter.

#[serde(rename = "Parameter")] @@ -3765,7 +3772,7 @@ pub struct GetParametersByPathRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetParametersByPathResult { ///

The token for the next set of items to return. Use this token to get the next set of results.

#[serde(rename = "NextToken")] @@ -3789,7 +3796,7 @@ pub struct GetParametersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetParametersResult { ///

A list of parameters that are not formatted correctly or do not run during an execution.

#[serde(rename = "InvalidParameters")] @@ -3813,7 +3820,7 @@ pub struct GetPatchBaselineForPatchGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPatchBaselineForPatchGroupResult { ///

The ID of the patch baseline that should be used for the patch group.

#[serde(rename = "BaselineId")] @@ -3837,7 +3844,7 @@ pub struct GetPatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPatchBaselineResult { ///

A set of rules used to include patches in the baseline.

#[serde(rename = "ApprovalRules")] @@ -3911,7 +3918,7 @@ pub struct GetServiceSettingRequest { ///

The query result body of the GetServiceSetting API action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetServiceSettingResult { ///

The query result of the current service setting.

#[serde(rename = "ServiceSetting")] @@ -3921,7 +3928,7 @@ pub struct GetServiceSettingResult { ///

Status information about the aggregated associations.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceAggregatedAssociationOverview { ///

Detailed status information about the aggregated associations.

#[serde(rename = "DetailedStatus")] @@ -3936,7 +3943,7 @@ pub struct InstanceAggregatedAssociationOverview { ///

One or more association documents on the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceAssociation { ///

The association ID.

#[serde(rename = "AssociationId")] @@ -3967,7 +3974,7 @@ pub struct InstanceAssociationOutputLocation { ///

The URL of Amazon S3 bucket where you want to store the results of this request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceAssociationOutputUrl { ///

The URL of Amazon S3 bucket where you want to store the results of this request.

#[serde(rename = "S3OutputUrl")] @@ -3977,7 +3984,7 @@ pub struct InstanceAssociationOutputUrl { ///

Status information about the instance association.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceAssociationStatusInfo { ///

The association ID.

#[serde(rename = "AssociationId")] @@ -4031,7 +4038,7 @@ pub struct InstanceAssociationStatusInfo { ///

Describes a filter for a specific list of instances.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceInformation { ///

The activation ID created by Systems Manager when the server or VM was registered.

#[serde(rename = "ActivationId")] @@ -4135,7 +4142,7 @@ pub struct InstanceInformationStringFilter { ///

Defines the high-level patch compliance state for a managed instance, providing information about the number of installed, missing, not applicable, and failed patches along with metadata about the operation when this information was gathered for the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstancePatchState { ///

The ID of the patch baseline used to patch the instance.

#[serde(rename = "BaselineId")] @@ -4230,7 +4237,7 @@ pub struct InventoryAggregator { ///

Status information returned by the DeleteInventory action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryDeletionStatusItem { ///

The deletion ID returned by the DeleteInventory action.

#[serde(rename = "DeletionId")] @@ -4264,7 +4271,7 @@ pub struct InventoryDeletionStatusItem { ///

Information about the delete operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryDeletionSummary { ///

Remaining number of items to delete.

#[serde(rename = "RemainingCount")] @@ -4282,7 +4289,7 @@ pub struct InventoryDeletionSummary { ///

Either a count, remaining count, or a version number in a delete inventory summary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryDeletionSummaryItem { ///

A count of the number of deleted items.

#[serde(rename = "Count")] @@ -4352,7 +4359,7 @@ pub struct InventoryItem { ///

Attributes are the entries within the inventory item content. It contains name and value.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryItemAttribute { ///

The data type of the inventory item attribute.

#[serde(rename = "DataType")] @@ -4364,7 +4371,7 @@ pub struct InventoryItemAttribute { ///

The inventory item schema definition. Users can use this to compose inventory query filters.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryItemSchema { ///

The schema attributes for inventory. This contains data type and attribute name.

#[serde(rename = "Attributes")] @@ -4384,7 +4391,7 @@ pub struct InventoryItemSchema { ///

Inventory query results.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryResultEntity { ///

The data section in the inventory result entity JSON.

#[serde(rename = "Data")] @@ -4398,7 +4405,7 @@ pub struct InventoryResultEntity { ///

The inventory result item.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InventoryResultItem { ///

The time inventory item data was captured.

#[serde(rename = "CaptureTime")] @@ -4434,7 +4441,7 @@ pub struct LabelParameterVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LabelParameterVersionResult { ///

The label does not meet the requirements. For information about parameter label requirements, see Labeling Parameters in the AWS Systems Manager User Guide.

#[serde(rename = "InvalidLabels")] @@ -4458,7 +4465,7 @@ pub struct ListAssociationVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssociationVersionsResult { ///

Information about all versions of the association for the specified association ID.

#[serde(rename = "AssociationVersions")] @@ -4487,7 +4494,7 @@ pub struct ListAssociationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAssociationsResult { ///

The associations.

#[serde(rename = "Associations")] @@ -4528,7 +4535,7 @@ pub struct ListCommandInvocationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCommandInvocationsResult { ///

(Optional) A list of all invocations.

#[serde(rename = "CommandInvocations")] @@ -4565,7 +4572,7 @@ pub struct ListCommandsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListCommandsResult { ///

(Optional) The list of commands requested by the user.

#[serde(rename = "Commands")] @@ -4602,7 +4609,7 @@ pub struct ListComplianceItemsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListComplianceItemsResult { ///

A list of compliance information for the specified resource ID.

#[serde(rename = "ComplianceItems")] @@ -4631,7 +4638,7 @@ pub struct ListComplianceSummariesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListComplianceSummariesResult { ///

A list of compliant and non-compliant summary counts based on compliance types. For example, this call returns State Manager associations, patches, or custom compliance types according to the filter criteria that you specified.

#[serde(rename = "ComplianceSummaryItems")] @@ -4659,7 +4666,7 @@ pub struct ListDocumentVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDocumentVersionsResult { ///

The document versions.

#[serde(rename = "DocumentVersions")] @@ -4692,7 +4699,7 @@ pub struct ListDocumentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDocumentsResult { ///

The names of the Systems Manager documents.

#[serde(rename = "DocumentIdentifiers")] @@ -4727,7 +4734,7 @@ pub struct ListInventoryEntriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListInventoryEntriesResult { ///

The time that inventory information was collected for the instance(s).

#[serde(rename = "CaptureTime")] @@ -4772,7 +4779,7 @@ pub struct ListResourceComplianceSummariesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceComplianceSummariesResult { ///

The token for the next set of items to return. Use this token to get the next set of results.

#[serde(rename = "NextToken")] @@ -4797,7 +4804,7 @@ pub struct ListResourceDataSyncRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceDataSyncResult { ///

The token for the next set of items to return. Use this token to get the next set of results.

#[serde(rename = "NextToken")] @@ -4820,7 +4827,7 @@ pub struct ListTagsForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceResult { ///

A list of tags.

#[serde(rename = "TagList")] @@ -4858,7 +4865,7 @@ pub struct MaintenanceWindowAutomationParameters { ///

Describes the information about an execution of a maintenance window.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowExecution { ///

The time the execution finished.

#[serde(rename = "EndTime")] @@ -4888,7 +4895,7 @@ pub struct MaintenanceWindowExecution { ///

Information about a task execution performed as part of a maintenance window execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowExecutionTaskIdentity { ///

The time the task execution finished.

#[serde(rename = "EndTime")] @@ -4926,7 +4933,7 @@ pub struct MaintenanceWindowExecutionTaskIdentity { ///

Describes the information about a task invocation for a particular target as part of a task execution performed as part of a maintenance window execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowExecutionTaskInvocationIdentity { ///

The time the invocation finished.

#[serde(rename = "EndTime")] @@ -4993,7 +5000,7 @@ pub struct MaintenanceWindowFilter { ///

Information about the maintenance window.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowIdentity { ///

The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.

#[serde(rename = "Cutoff")] @@ -5043,7 +5050,7 @@ pub struct MaintenanceWindowIdentity { ///

The maintenance window to which the specified target belongs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowIdentityForTarget { ///

The name of the maintenance window.

#[serde(rename = "Name")] @@ -5118,14 +5125,14 @@ pub struct MaintenanceWindowRunCommandParameters { pub timeout_seconds: Option, } -///

The parameters for a STEP_FUNCTION task.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Step Functions tasks, Systems Manager ignores any values specified for TaskParameters and LoggingInfo.

+///

The parameters for a STEP_FUNCTIONS task.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Step Functions tasks, Systems Manager ignores any values specified for TaskParameters and LoggingInfo.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MaintenanceWindowStepFunctionsParameters { - ///

The inputs for the STEP_FUNCTION task.

+ ///

The inputs for the STEP_FUNCTIONS task.

#[serde(rename = "Input")] #[serde(skip_serializing_if = "Option::is_none")] pub input: Option, - ///

The name of the STEP_FUNCTION task.

+ ///

The name of the STEP_FUNCTIONS task.

#[serde(rename = "Name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, @@ -5133,7 +5140,7 @@ pub struct MaintenanceWindowStepFunctionsParameters { ///

The target registered with the maintenance window.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowTarget { ///

A description for the target.

#[serde(rename = "Description")] @@ -5167,7 +5174,7 @@ pub struct MaintenanceWindowTarget { ///

Information about a task defined for a maintenance window.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MaintenanceWindowTask { ///

A description of the task.

#[serde(rename = "Description")] @@ -5201,7 +5208,7 @@ pub struct MaintenanceWindowTask { #[serde(rename = "Targets")] #[serde(skip_serializing_if = "Option::is_none")] pub targets: Option>, - ///

The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, TaskArn is the Systems Manager document name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTION tasks, it's the state machine ARN.

+ ///

The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, TaskArn is the Systems Manager document name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTIONS tasks, it's the state machine ARN.

#[serde(rename = "TaskArn")] #[serde(skip_serializing_if = "Option::is_none")] pub task_arn: Option, @@ -5210,7 +5217,7 @@ pub struct MaintenanceWindowTask { #[serde(skip_serializing_if = "Option::is_none")] pub task_parameters: Option<::std::collections::HashMap>, - ///

The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, LAMBDA, or STEP_FUNCTION.

+ ///

The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, LAMBDA, or STEP_FUNCTIONS.

#[serde(rename = "Type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option, @@ -5239,7 +5246,7 @@ pub struct MaintenanceWindowTaskInvocationParameters { #[serde(rename = "RunCommand")] #[serde(skip_serializing_if = "Option::is_none")] pub run_command: Option, - ///

The parameters for a STEP_FUNCTION task type.

+ ///

The parameters for a STEP_FUNCTIONS task type.

#[serde(rename = "StepFunctions")] #[serde(skip_serializing_if = "Option::is_none")] pub step_functions: Option, @@ -5273,12 +5280,12 @@ pub struct ModifyDocumentPermissionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyDocumentPermissionResponse {} ///

A summary of resources that are not compliant. The summary is organized according to resource type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NonCompliantSummary { ///

The total number of compliance items that are not compliant.

#[serde(rename = "NonCompliantCount")] @@ -5338,7 +5345,7 @@ pub struct OpsAggregator { ///

The result of the query.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OpsEntity { ///

The data returned by the query.

#[serde(rename = "Data")] @@ -5352,7 +5359,7 @@ pub struct OpsEntity { ///

The OpsItem summaries result item.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OpsEntityItem { ///

The detailed data content for an OpsItem summaries result item.

#[serde(rename = "Content")] @@ -5375,9 +5382,9 @@ pub struct OpsFilter { pub values: Vec, } -///

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+///

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OpsItem { ///

The ARN of the AWS account that created the OpsItem.

#[serde(rename = "CreatedBy")] @@ -5403,7 +5410,7 @@ pub struct OpsItem { #[serde(rename = "Notifications")] #[serde(skip_serializing_if = "Option::is_none")] pub notifications: Option>, - ///

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

This custom data is searchable, but with restrictions. For the Searchable operational data feature, all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. For the Private operational data feature, the data is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

+ ///

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems Manually in the AWS Systems Manager User Guide.

#[serde(rename = "OperationalData")] #[serde(skip_serializing_if = "Option::is_none")] pub operational_data: Option<::std::collections::HashMap>, @@ -5415,7 +5422,7 @@ pub struct OpsItem { #[serde(rename = "Priority")] #[serde(skip_serializing_if = "Option::is_none")] pub priority: Option, - ///

One or more OpsItems that share something in common with the current OpsItems. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource.

+ ///

One or more OpsItems that share something in common with the current OpsItem. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource.

#[serde(rename = "RelatedOpsItems")] #[serde(skip_serializing_if = "Option::is_none")] pub related_ops_items: Option>, @@ -5423,7 +5430,7 @@ pub struct OpsItem { #[serde(rename = "Source")] #[serde(skip_serializing_if = "Option::is_none")] pub source: Option, - ///

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem Details in the AWS Systems Manager User Guide.

+ ///

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem Details in the AWS Systems Manager User Guide.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, @@ -5450,7 +5457,7 @@ pub struct OpsItemDataValue { pub value: Option, } -///

Describes an OpsCenter filter.

+///

Describes an OpsItem filter.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct OpsItemFilter { ///

The name of the filter.

@@ -5475,7 +5482,7 @@ pub struct OpsItemNotification { ///

A count of OpsItems.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OpsItemSummary { ///

The Amazon Resource Name (ARN) of the IAM entity that created the OpsItem.

#[serde(rename = "CreatedBy")] @@ -5493,7 +5500,7 @@ pub struct OpsItemSummary { #[serde(rename = "LastModifiedTime")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_time: Option, - ///

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

This custom data is searchable, but with restrictions. For the Searchable operational data feature, all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. For the Private operational data feature, the data is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

+ ///

Operational data is custom data that provides useful reference details about the OpsItem.

#[serde(rename = "OperationalData")] #[serde(skip_serializing_if = "Option::is_none")] pub operational_data: Option<::std::collections::HashMap>, @@ -5521,7 +5528,7 @@ pub struct OpsItemSummary { ///

Information about the source where the association execution details are stored.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OutputSource { ///

The ID of the output source, for example the URL of an Amazon S3 bucket.

#[serde(rename = "OutputSourceId")] @@ -5535,7 +5542,7 @@ pub struct OutputSource { ///

An Amazon EC2 Systems Manager parameter in Parameter Store.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Parameter { ///

The Amazon Resource Name (ARN) of the parameter.

#[serde(rename = "ARN")] @@ -5573,7 +5580,7 @@ pub struct Parameter { ///

Information about parameter usage.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterHistory { ///

Parameter names can include the following letters and symbols.

a-zA-Z0-9_.-

#[serde(rename = "AllowedPattern")] @@ -5627,7 +5634,7 @@ pub struct ParameterHistory { ///

One or more policies assigned to a parameter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterInlinePolicy { ///

The status of the policy. Policies report the following statuses: Pending (the policy has not been enforced or applied yet), Finished (the policy was applied), Failed (the policy was not applied), or InProgress (the policy is being applied now).

#[serde(rename = "PolicyStatus")] @@ -5645,7 +5652,7 @@ pub struct ParameterInlinePolicy { ///

Metadata includes information like the ARN of the last user and the date/time the parameter was last used.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ParameterMetadata { ///

A parameter name can include only the following letters and symbols.

a-zA-Z0-9_.-

#[serde(rename = "AllowedPattern")] @@ -5718,7 +5725,7 @@ pub struct ParametersFilter { ///

Represents metadata about a patch.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Patch { ///

The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates).

#[serde(rename = "Classification")] @@ -5776,7 +5783,7 @@ pub struct Patch { ///

Defines the basic information about a patch baseline.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PatchBaselineIdentity { ///

The description of the patch baseline.

#[serde(rename = "BaselineDescription")] @@ -5802,7 +5809,7 @@ pub struct PatchBaselineIdentity { ///

Information about the state of a patch on a particular instance as it relates to the patch baseline used to patch the instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PatchComplianceData { ///

The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates).

#[serde(rename = "Classification")] @@ -5845,7 +5852,7 @@ pub struct PatchFilterGroup { ///

The mapping between a patch group and the patch baseline the patch group is registered with.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PatchGroupPatchBaselineMapping { ///

The patch baseline the patch group is registered with.

#[serde(rename = "BaselineIdentity")] @@ -5913,7 +5920,7 @@ pub struct PatchSource { ///

Information about the approval status of a patch.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PatchStatus { ///

The date the patch was approved (or will be approved if the status is PENDING_APPROVAL).

#[serde(rename = "ApprovalDate")] @@ -5931,7 +5938,7 @@ pub struct PatchStatus { ///

An aggregate of step execution statuses displayed in the AWS Console for a multi-Region and multi-account Automation execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ProgressCounters { ///

The total number of steps that the system cancelled in all specified AWS Regions and accounts for the current Automation execution.

#[serde(rename = "CancelledSteps")] @@ -5979,7 +5986,7 @@ pub struct PutComplianceItemsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutComplianceItemsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -5993,7 +6000,7 @@ pub struct PutInventoryRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutInventoryResult { ///

Information about the request.

#[serde(rename = "Message")] @@ -6030,7 +6037,7 @@ pub struct PutParameterRequest { #[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, - ///

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a value limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters per account and per Region. Standard parameters are offered at no additional cost.

Advanced parameters have a value limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters per account and per Region. Advanced parameters incur a charge.

If you don't specify a parameter tier when you create a new parameter, the parameter defaults to using the standard tier. You can change a standard parameter to an advanced parameter at any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter. For more information, see About Advanced Parameters in the AWS Systems Manager User Guide.

+ ///

The parameter tier to assign to a parameter.

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an AWS account. Standard parameters are offered at no additional cost.

Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an AWS account. Advanced parameters incur a charge. For more information, see About Advanced Parameters in the AWS Systems Manager User Guide.

You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter.

Using the Default Tier Configuration

In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you do not specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration.

The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default:

  • Advanced: With this option, Parameter Store evaluates all requests as advanced parameters.

  • Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine if the parameter is standard or advanced.

    If the request doesn't include any options that require an advanced parameter, the parameter is created in the standard-parameter tier. If one or more options requiring an advanced parameter are included in the request, Parameter Store create a parameter in the advanced-parameter tier.

    This approach helps control your parameter-related costs by always creating standard parameters unless an advanced parameter is necessary.

Options that require an advanced parameter include the following:

  • The content size of the parameter is more than 4 KB.

  • The parameter uses a parameter policy.

  • More than 10,000 parameters already exist in your AWS account in the current Region.

For more information about configuring the default tier option, see Specifying a Default Parameter Tier in the AWS Systems Manager User Guide.

#[serde(rename = "Tier")] #[serde(skip_serializing_if = "Option::is_none")] pub tier: Option, @@ -6043,7 +6050,7 @@ pub struct PutParameterRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutParameterResult { ///

The new version number of a parameter. If you edit a parameter value, Parameter Store automatically creates a new version and assigns this new version a unique ID. You can reference a parameter version ID in API actions or in Systems Manager documents (SSM documents). By default, if you don't specify a specific version, the system returns the latest parameter value when a parameter is called.

#[serde(rename = "Version")] @@ -6059,7 +6066,7 @@ pub struct RegisterDefaultPatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterDefaultPatchBaselineResult { ///

The ID of the default patch baseline.

#[serde(rename = "BaselineId")] @@ -6078,7 +6085,7 @@ pub struct RegisterPatchBaselineForPatchGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterPatchBaselineForPatchGroupResult { ///

The ID of the patch baseline the patch group was registered with.

#[serde(rename = "BaselineId")] @@ -6111,7 +6118,7 @@ pub struct RegisterTargetWithMaintenanceWindowRequest { ///

The type of target being registered with the maintenance window.

#[serde(rename = "ResourceType")] pub resource_type: String, - ///

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

You can specify targets using either instance IDs or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

For more information about these examples formats, including the best use case for each one, see Examples: Register Targets with a Maintenance Window in the AWS Systems Manager User Guide.

+ ///

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register Targets with a Maintenance Window in the AWS Systems Manager User Guide.

#[serde(rename = "Targets")] pub targets: Vec, ///

The ID of the maintenance window the target should be registered with.

@@ -6120,7 +6127,7 @@ pub struct RegisterTargetWithMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterTargetWithMaintenanceWindowResult { ///

The ID of the target definition in this maintenance window.

#[serde(rename = "WindowTargetId")] @@ -6160,7 +6167,7 @@ pub struct RegisterTaskWithMaintenanceWindowRequest { #[serde(rename = "ServiceRoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub service_role_arn: Option, - ///

The targets (either instances or maintenance window targets).

Specify instances using the following format:

Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>

Specify maintenance window targets using the following format:

Key=<WindowTargetIds>,Values=<window-target-id-1>,<window-target-id-2>

+ ///

The targets (either instances or maintenance window targets).

Specify instances using the following format:

Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>

Specify maintenance window targets using the following format:

Key=WindowTargetIds;,Values=<window-target-id-1>,<window-target-id-2>

#[serde(rename = "Targets")] pub targets: Vec, ///

The ARN of the task to run.

@@ -6184,7 +6191,7 @@ pub struct RegisterTaskWithMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterTaskWithMaintenanceWindowResult { ///

The ID of the task in the maintenance window.

#[serde(rename = "WindowTaskId")] @@ -6192,7 +6199,7 @@ pub struct RegisterTaskWithMaintenanceWindowResult { pub window_task_id: Option, } -///

An OpsItems that shares something in common with the current OpsItems. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource.

+///

An OpsItems that shares something in common with the current OpsItem. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RelatedOpsItem { ///

The ID of an OpsItem related to the current OpsItem.

@@ -6202,10 +6209,10 @@ pub struct RelatedOpsItem { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RemoveTagsFromResourceRequest { - ///

The resource ID for which you want to remove tags. Use the ID of the resource. Here are some examples:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

+ ///

The ID of the resource from which you want to remove tags. For example:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

#[serde(rename = "ResourceId")] pub resource_id: String, - ///

The type of resource of which you want to remove a tag.

The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

+ ///

The type of resource from which you want to remove a tag.

The ManagedInstance type for this API action is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

#[serde(rename = "ResourceType")] pub resource_type: String, ///

Tag keys that you want to remove from the specified resource.

@@ -6214,7 +6221,7 @@ pub struct RemoveTagsFromResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsFromResourceResult {} ///

The request body of the ResetServiceSetting API action.

@@ -6227,7 +6234,7 @@ pub struct ResetServiceSettingRequest { ///

The result body of the ResetServiceSetting API action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetServiceSettingResult { ///

The current, effective service setting after calling the ResetServiceSetting API action.

#[serde(rename = "ServiceSetting")] @@ -6237,7 +6244,7 @@ pub struct ResetServiceSettingResult { ///

Information about targets that resolved during the Automation execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResolvedTargets { ///

A list of parameter values sent to targets that resolved during the Automation execution.

#[serde(rename = "ParameterValues")] @@ -6251,7 +6258,7 @@ pub struct ResolvedTargets { ///

Compliance summary information for a specific resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceComplianceSummaryItem { ///

The compliance type.

#[serde(rename = "ComplianceType")] @@ -6289,7 +6296,7 @@ pub struct ResourceComplianceSummaryItem { ///

Information about a Resource Data Sync configuration, including its current status and last successful sync.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceDataSyncItem { ///

The status reported by the last sync.

#[serde(rename = "LastStatus")] @@ -6359,13 +6366,13 @@ pub struct ResumeSessionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResumeSessionResponse { ///

The ID of the session.

#[serde(rename = "SessionId")] #[serde(skip_serializing_if = "Option::is_none")] pub session_id: Option, - ///

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssm-messages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

+ ///

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

#[serde(rename = "StreamUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub stream_url: Option, @@ -6394,7 +6401,7 @@ pub struct S3OutputLocation { ///

A URL for the Amazon S3 bucket where you want to store the results of this request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct S3OutputUrl { ///

A URL for an Amazon S3 bucket where you want to store the results of this request.

#[serde(rename = "OutputUrl")] @@ -6404,7 +6411,7 @@ pub struct S3OutputUrl { ///

Information about a scheduled execution for a maintenance window.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduledWindowExecution { ///

The time, in ISO-8601 Extended format, that the maintenance window is scheduled to be run.

#[serde(rename = "ExecutionTime")] @@ -6435,7 +6442,7 @@ pub struct SendAutomationSignalRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendAutomationSignalResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -6510,7 +6517,7 @@ pub struct SendCommandRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendCommandResult { ///

The request as it was received by Systems Manager. Also provides the command ID which can be used future references to this request.

#[serde(rename = "Command")] @@ -6520,7 +6527,7 @@ pub struct SendCommandResult { ///

The service setting data structure.

ServiceSetting is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of "false". This means the user can't use this feature unless they change the setting to "true" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. AWS services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the UpdateServiceSetting API action to change the default setting. Or, use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceSetting { ///

The ARN of the service setting.

#[serde(rename = "ARN")] @@ -6550,7 +6557,7 @@ pub struct ServiceSetting { ///

Information about a Session Manager connection to an instance.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Session { ///

Reserved for future use.

#[serde(rename = "Details")] @@ -6603,7 +6610,7 @@ pub struct SessionFilter { ///

Reserved for future use.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SessionManagerOutputUrl { ///

Reserved for future use.

#[serde(rename = "CloudWatchOutputUrl")] @@ -6617,7 +6624,7 @@ pub struct SessionManagerOutputUrl { ///

The number of managed instances found for each patch severity level defined in the request filter.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SeveritySummary { ///

The total number of resources or compliance items that have a severity level of critical. Critical severity is determined by the organization that published the compliance items.

#[serde(rename = "CriticalCount")] @@ -6653,7 +6660,7 @@ pub struct StartAssociationsOnceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartAssociationsOnceResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -6704,7 +6711,7 @@ pub struct StartAutomationExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartAutomationExecutionResult { ///

The unique ID of a newly scheduled automation execution.

#[serde(rename = "AutomationExecutionId")] @@ -6728,13 +6735,13 @@ pub struct StartSessionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartSessionResponse { ///

The ID of the session.

#[serde(rename = "SessionId")] #[serde(skip_serializing_if = "Option::is_none")] pub session_id: Option, - ///

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssm-messages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

+ ///

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

#[serde(rename = "StreamUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub stream_url: Option, @@ -6746,7 +6753,7 @@ pub struct StartSessionResponse { ///

Detailed information about an the execution state of an Automation step.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StepExecution { ///

The action this step performs. The action determines the behavior of the step.

#[serde(rename = "Action")] @@ -6861,7 +6868,7 @@ pub struct StopAutomationExecutionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopAutomationExecutionResult {} ///

Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines.

@@ -6875,14 +6882,14 @@ pub struct Tag { pub value: String, } -///

An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.

+///

An array of search criteria that targets instances using a Key,Value combination that you specify.

Supported formats include the following.

  • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

  • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

  • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

  • (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name

  • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For example:

  • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

  • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

  • Key=tag-key,Values=Name,Instance-Type,CostCenter

  • (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup

  • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For information about how to send commands that target instances using Key,Value parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Target { - ///

User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.

+ ///

User-defined criteria for sending commands that target instances that meet the criteria.

#[serde(rename = "Key")] #[serde(skip_serializing_if = "Option::is_none")] pub key: Option, - ///

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to run a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.

+ ///

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to run a command on instances that include Amazon EC2 tags of ServerRole,WebServer.

#[serde(rename = "Values")] #[serde(skip_serializing_if = "Option::is_none")] pub values: Option>, @@ -6921,7 +6928,7 @@ pub struct TerminateSessionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminateSessionResponse { ///

The ID of the session that has been terminated.

#[serde(rename = "SessionId")] @@ -6985,7 +6992,7 @@ pub struct UpdateAssociationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAssociationResult { ///

The description of the association that was updated.

#[serde(rename = "AssociationDescription")] @@ -7007,7 +7014,7 @@ pub struct UpdateAssociationStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAssociationStatusResult { ///

Information about the association.

#[serde(rename = "AssociationDescription")] @@ -7026,7 +7033,7 @@ pub struct UpdateDocumentDefaultVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDocumentDefaultVersionResult { ///

The description of a custom document that you want to set as the default version.

#[serde(rename = "Description")] @@ -7047,7 +7054,7 @@ pub struct UpdateDocumentRequest { #[serde(rename = "DocumentFormat")] #[serde(skip_serializing_if = "Option::is_none")] pub document_format: Option, - ///

The version of the document that you want to update.

+ ///

(Required) The version of the document that you want to update.

#[serde(rename = "DocumentVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub document_version: Option, @@ -7065,7 +7072,7 @@ pub struct UpdateDocumentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDocumentResult { ///

A description of the document that was updated.

#[serde(rename = "DocumentDescription")] @@ -7125,7 +7132,7 @@ pub struct UpdateMaintenanceWindowRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMaintenanceWindowResult { ///

Whether targets must be registered with the maintenance window before tasks can be defined for those targets.

#[serde(rename = "AllowUnassociatedTargets")] @@ -7204,7 +7211,7 @@ pub struct UpdateMaintenanceWindowTargetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMaintenanceWindowTargetResult { ///

The updated description.

#[serde(rename = "Description")] @@ -7292,7 +7299,7 @@ pub struct UpdateMaintenanceWindowTaskRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMaintenanceWindowTaskResult { ///

The updated task description.

#[serde(rename = "Description")] @@ -7360,7 +7367,7 @@ pub struct UpdateManagedInstanceRoleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateManagedInstanceRoleResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -7373,7 +7380,7 @@ pub struct UpdateOpsItemRequest { #[serde(rename = "Notifications")] #[serde(skip_serializing_if = "Option::is_none")] pub notifications: Option>, - ///

Add new keys or edit existing key-value pairs of the OperationalData map in the OpsItem object.

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

This custom data is searchable, but with restrictions. For the Searchable operational data feature, all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. For the Private operational data feature, the data is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

+ ///

Add new keys or edit existing key-value pairs of the OperationalData map in the OpsItem object.

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems Manually in the AWS Systems Manager User Guide.

#[serde(rename = "OperationalData")] #[serde(skip_serializing_if = "Option::is_none")] pub operational_data: Option<::std::collections::HashMap>, @@ -7392,7 +7399,7 @@ pub struct UpdateOpsItemRequest { #[serde(rename = "RelatedOpsItems")] #[serde(skip_serializing_if = "Option::is_none")] pub related_ops_items: Option>, - ///

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem Details in the AWS Systems Manager User Guide.

+ ///

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem Details in the AWS Systems Manager User Guide.

#[serde(rename = "Status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, @@ -7403,7 +7410,7 @@ pub struct UpdateOpsItemRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateOpsItemResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -7458,7 +7465,7 @@ pub struct UpdatePatchBaselineRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePatchBaselineResult { ///

A set of rules used to include patches in the baseline.

#[serde(rename = "ApprovalRules")] @@ -7531,7 +7538,7 @@ pub struct UpdateServiceSettingRequest { ///

The result body of the UpdateServiceSetting API action.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateServiceSettingResult {} /// Errors returned by AddTagsToResource @@ -7543,7 +7550,7 @@ pub enum AddTagsToResourceError { InvalidResourceId(String), ///

The resource type is not valid. For example, if you are attempting to tag an instance, the instance must be a registered, managed instance.

InvalidResourceType(String), - ///

The Targets parameter includes too many tags. Remove one or more tags and try the command again.

+ ///

The Targets parameter includes too many tags. Remove one or more tags and try the command again.

TooManyTagsError(String), ///

There are concurrent updates for a resource that supports one update at a time.

TooManyUpdates(String), @@ -7604,7 +7611,7 @@ pub enum CancelCommandError { InternalServerError(String), InvalidCommandId(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), } @@ -7739,7 +7746,7 @@ pub enum CreateAssociationError { InvalidDocument(String), ///

The document version is not valid or does not exist.

InvalidDocumentVersion(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The output location is not valid or does not exist.

InvalidOutputLocation(String), @@ -7844,7 +7851,7 @@ pub enum CreateAssociationBatchError { InvalidDocument(String), ///

The document version is not valid or does not exist.

InvalidDocumentVersion(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The output location is not valid or does not exist.

InvalidOutputLocation(String), @@ -7951,7 +7958,7 @@ impl Error for CreateAssociationBatchError { pub enum CreateDocumentError { ///

The specified document already exists.

DocumentAlreadyExists(String), - ///

You can have at most 200 active Systems Manager documents.

+ ///

You can have at most 500 active Systems Manager documents.

DocumentLimitExceeded(String), ///

An error occurred on the server side.

InternalServerError(String), @@ -8079,7 +8086,7 @@ pub enum CreateOpsItemError { OpsItemAlreadyExists(String), ///

A specified parameter argument isn't valid. Verify the available arguments and try again.

OpsItemInvalidParameter(String), - ///

The request caused OpsItems to exceed one or more limits. For information about OpsItem limits, see What are the resource limits for OpsItems?.

+ ///

The request caused OpsItems to exceed one or more limits. For information about OpsItem limits, see What are the resource limits for OpsCenter?.

OpsItemLimitExceeded(String), } @@ -8297,7 +8304,7 @@ pub enum DeleteAssociationError { InternalServerError(String), ///

The specified document does not exist.

InvalidDocument(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

There are concurrent updates for a resource that supports one update at a time.

TooManyUpdates(String), @@ -8661,7 +8668,7 @@ impl Error for DeleteResourceDataSyncError { pub enum DeregisterManagedInstanceError { ///

An error occurred on the server side.

InternalServerError(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), } @@ -8902,7 +8909,7 @@ pub enum DescribeAssociationError { InvalidAssociationVersion(String), ///

The specified document does not exist.

InvalidDocument(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), } @@ -9346,7 +9353,7 @@ impl Error for DescribeDocumentPermissionError { pub enum DescribeEffectiveInstanceAssociationsError { ///

An error occurred on the server side.

InternalServerError(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified token is not valid.

InvalidNextToken(String), @@ -9464,7 +9471,7 @@ impl Error for DescribeEffectivePatchesForPatchBaselineError { pub enum DescribeInstanceAssociationsStatusError { ///

An error occurred on the server side.

InternalServerError(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified token is not valid.

InvalidNextToken(String), @@ -9519,7 +9526,7 @@ pub enum DescribeInstanceInformationError { InternalServerError(String), ///

The specified key is not valid.

InvalidFilterKey(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified filter value is not valid.

InvalidInstanceInformationFilterValue(String), @@ -9690,7 +9697,7 @@ pub enum DescribeInstancePatchesError { InternalServerError(String), ///

The filter name is not valid. Verify the you entered the correct name and try again.

InvalidFilter(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified token is not valid.

InvalidNextToken(String), @@ -10482,7 +10489,7 @@ pub enum GetCommandInvocationError { InternalServerError(String), InvalidCommandId(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The plugin name is not valid.

InvalidPluginName(String), @@ -11670,7 +11677,7 @@ pub enum ListCommandInvocationsError { InvalidCommandId(String), ///

The specified key is not valid.

InvalidFilterKey(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified token is not valid.

InvalidNextToken(String), @@ -11737,7 +11744,7 @@ pub enum ListCommandsError { InvalidCommandId(String), ///

The specified key is not valid.

InvalidFilterKey(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified token is not valid.

InvalidNextToken(String), @@ -12004,7 +12011,7 @@ pub enum ListInventoryEntriesError { InternalServerError(String), ///

The filter name is not valid. Verify the you entered the correct name and try again.

InvalidFilter(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The specified token is not valid.

InvalidNextToken(String), @@ -12212,7 +12219,7 @@ impl Error for ListTagsForResourceError { /// Errors returned by ModifyDocumentPermission #[derive(Debug, PartialEq)] pub enum ModifyDocumentPermissionError { - ///

You can have at most 200 active Systems Manager documents.

+ ///

You can have at most 500 active Systems Manager documents.

DocumentLimitExceeded(String), ///

The document cannot be shared with more AWS user accounts. You can share a document with a maximum of 20 accounts. You can publicly share up to five documents. If you need to increase this limit, contact AWS Support.

DocumentPermissionLimit(String), @@ -12366,7 +12373,7 @@ pub enum PutInventoryError { CustomSchemaCountLimitExceeded(String), ///

An error occurred on the server side.

InternalServerError(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

You specified invalid keys or values in the Context attribute for InventoryItem. Verify the keys and values, and try again.

InvalidInventoryItemContext(String), @@ -12785,7 +12792,7 @@ impl Error for RegisterTargetWithMaintenanceWindowError { pub enum RegisterTaskWithMaintenanceWindowError { ///

Error returned when the ID specified for a resource, such as a maintenance window or Patch baseline, doesn't exist.

For information about resource limits in Systems Manager, see AWS Systems Manager Limits.

DoesNotExist(String), - ///

You attempted to register a LAMBDA or STEP_FUNCTION task in a region where the corresponding service is not available.

+ ///

You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where the corresponding service is not available.

FeatureNotAvailable(String), ///

Error returned when an idempotent operation is retried and the parameters don't match the original call to the API with the same idempotency token.

IdempotentParameterMismatch(String), @@ -13068,7 +13075,7 @@ pub enum SendCommandError { InvalidDocument(String), ///

The document version is not valid or does not exist.

InvalidDocumentVersion(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

One or more configuration items is not valid. Verify that a valid Amazon Resource Name (ARN) was provided for an Amazon SNS topic.

InvalidNotificationConfig(String), @@ -13537,7 +13544,7 @@ pub enum UpdateAssociationStatusError { InternalServerError(String), ///

The specified document does not exist.

InvalidDocument(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), ///

The updated status is the same as the current status.

StatusUnchanged(String), @@ -13905,7 +13912,7 @@ impl Error for UpdateMaintenanceWindowTaskError { pub enum UpdateManagedInstanceRoleError { ///

An error occurred on the server side.

InternalServerError(String), - ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

+ ///

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

InvalidInstanceId(String), } @@ -13952,7 +13959,7 @@ pub enum UpdateOpsItemError { OpsItemAlreadyExists(String), ///

A specified parameter argument isn't valid. Verify the available arguments and try again.

OpsItemInvalidParameter(String), - ///

The request caused OpsItems to exceed one or more limits. For information about OpsItem limits, see What are the resource limits for OpsItems?.

+ ///

The request caused OpsItems to exceed one or more limits. For information about OpsItem limits, see What are the resource limits for OpsCenter?.

OpsItemLimitExceeded(String), ///

The specified OpsItem ID doesn't exist. Verify the ID and try again.

OpsItemNotFound(String), @@ -14142,7 +14149,7 @@ pub trait Ssm { input: CreateMaintenanceWindowRequest, ) -> RusotoFuture; - ///

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn create_ops_item( &self, input: CreateOpsItemRequest, @@ -14196,7 +14203,7 @@ pub trait Ssm { input: DeleteParameterRequest, ) -> RusotoFuture; - ///

Delete a list of parameters. This API is used to delete parameters by using the Amazon EC2 console.

+ ///

Delete a list of parameters.

fn delete_parameters( &self, input: DeleteParametersRequest, @@ -14247,7 +14254,7 @@ pub trait Ssm { DeregisterTaskFromMaintenanceWindowError, >; - ///

Details about the activation, including: the date and time the activation was created, the expiration date, the IAM role assigned to the instances in the activation, and the number of instances activated by this registration.

+ ///

Describes details about the activation, such as the date and time the activation was created, its expiration date, the IAM role assigned to the instances in the activation, and the number of instances registered by using this activation.

fn describe_activations( &self, input: DescribeActivationsRequest, @@ -14286,7 +14293,7 @@ pub trait Ssm { input: DescribeAutomationStepExecutionsRequest, ) -> RusotoFuture; - ///

Lists all patches that could possibly be included in a patch baseline.

+ ///

Lists all patches eligible to be included in a patch baseline.

fn describe_available_patches( &self, input: DescribeAvailablePatchesRequest, @@ -14424,7 +14431,7 @@ pub trait Ssm { DescribeMaintenanceWindowsForTargetError, >; - ///

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn describe_ops_items( &self, input: DescribeOpsItemsRequest, @@ -14523,7 +14530,7 @@ pub trait Ssm { input: GetMaintenanceWindowRequest, ) -> RusotoFuture; - ///

Retrieves details about a specific task run as part of a maintenance window execution.

+ ///

Retrieves details about a specific a maintenance window execution.

fn get_maintenance_window_execution( &self, input: GetMaintenanceWindowExecutionRequest, @@ -14535,7 +14542,7 @@ pub trait Ssm { input: GetMaintenanceWindowExecutionTaskRequest, ) -> RusotoFuture; - ///

Retrieves a task invocation. A task invocation is a specific task running on a specific target. maintenance windows report status for all invocations.

+ ///

Retrieves information about a specific task running on a specific target.

fn get_maintenance_window_execution_task_invocation( &self, input: GetMaintenanceWindowExecutionTaskInvocationRequest, @@ -14550,7 +14557,7 @@ pub trait Ssm { input: GetMaintenanceWindowTaskRequest, ) -> RusotoFuture; - ///

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn get_ops_item( &self, input: GetOpsItemRequest, @@ -14706,7 +14713,7 @@ pub trait Ssm { input: PutParameterRequest, ) -> RusotoFuture; - ///

Defines the default patch baseline.

+ ///

Defines the default patch baseline for the relevant operating system.

To reset the AWS predefined patch baseline as the default, specify the full patch baseline ARN as the baseline ID value. For example, for CentOS, specify arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead of pb-0574b43a65ea646ed.

fn register_default_patch_baseline( &self, input: RegisterDefaultPatchBaselineRequest, @@ -14736,7 +14743,7 @@ pub trait Ssm { input: RegisterTaskWithMaintenanceWindowRequest, ) -> RusotoFuture; - ///

Removes all tags from the specified resource.

+ ///

Removes tag keys from the specified resource.

fn remove_tags_from_resource( &self, input: RemoveTagsFromResourceRequest, @@ -14808,7 +14815,7 @@ pub trait Ssm { input: UpdateAssociationStatusRequest, ) -> RusotoFuture; - ///

The document you want to update.

+ ///

Updates one or more values for an SSM document.

fn update_document( &self, input: UpdateDocumentRequest, @@ -14826,7 +14833,7 @@ pub trait Ssm { input: UpdateMaintenanceWindowRequest, ) -> RusotoFuture; - ///

Modifies the target of an existing maintenance window. You can't change the target type, but you can change the following:

The target from being an ID target to a Tag target, or a Tag target to an ID target.

IDs for an ID target.

Tags for a Tag target.

Owner.

Name.

Description.

If a parameter is null, then the corresponding field is not modified.

+ ///

Modifies the target of an existing maintenance window. You can change the following:

  • Name

  • Description

  • Owner

  • IDs for an ID target

  • Tags for a Tag target

  • From any supported tag type to another. The three supported tag types are ID target, Tag target, and resource group. For more information, see Target.

If a parameter is null, then the corresponding field is not modified.

fn update_maintenance_window_target( &self, input: UpdateMaintenanceWindowTargetRequest, @@ -14838,13 +14845,13 @@ pub trait Ssm { input: UpdateMaintenanceWindowTaskRequest, ) -> RusotoFuture; - ///

Assigns or changes an Amazon Identity and Access Management (IAM) role to the managed instance.

+ ///

Assigns or changes an Amazon Identity and Access Management (IAM) role for the managed instance.

fn update_managed_instance_role( &self, input: UpdateManagedInstanceRoleRequest, ) -> RusotoFuture; - ///

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn update_ops_item( &self, input: UpdateOpsItemRequest, @@ -14874,10 +14881,7 @@ impl SsmClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SsmClient { - SsmClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -14891,10 +14895,14 @@ impl SsmClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SsmClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SsmClient { + SsmClient { client, region } } } @@ -15127,7 +15135,7 @@ impl Ssm for SsmClient { }) } - ///

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn create_ops_item( &self, input: CreateOpsItemRequest, @@ -15383,7 +15391,7 @@ impl Ssm for SsmClient { }) } - ///

Delete a list of parameters. This API is used to delete parameters by using the Amazon EC2 console.

+ ///

Delete a list of parameters.

fn delete_parameters( &self, input: DeleteParametersRequest, @@ -15596,7 +15604,7 @@ impl Ssm for SsmClient { }) } - ///

Details about the activation, including: the date and time the activation was created, the expiration date, the IAM role assigned to the instances in the activation, and the number of instances activated by this registration.

+ ///

Describes details about the activation, such as the date and time the activation was created, its expiration date, the IAM role assigned to the instances in the activation, and the number of instances registered by using this activation.

fn describe_activations( &self, input: DescribeActivationsRequest, @@ -15767,7 +15775,7 @@ impl Ssm for SsmClient { }) } - ///

Lists all patches that could possibly be included in a patch baseline.

+ ///

Lists all patches eligible to be included in a patch baseline.

fn describe_available_patches( &self, input: DescribeAvailablePatchesRequest, @@ -16331,7 +16339,7 @@ impl Ssm for SsmClient { }) } - ///

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn describe_ops_items( &self, input: DescribeOpsItemsRequest, @@ -16785,7 +16793,7 @@ impl Ssm for SsmClient { }) } - ///

Retrieves details about a specific task run as part of a maintenance window execution.

+ ///

Retrieves details about a specific a maintenance window execution.

fn get_maintenance_window_execution( &self, input: GetMaintenanceWindowExecutionRequest, @@ -16843,7 +16851,7 @@ impl Ssm for SsmClient { }) } - ///

Retrieves a task invocation. A task invocation is a specific task running on a specific target. maintenance windows report status for all invocations.

+ ///

Retrieves information about a specific task running on a specific target.

fn get_maintenance_window_execution_task_invocation( &self, input: GetMaintenanceWindowExecutionTaskInvocationRequest, @@ -16901,7 +16909,7 @@ impl Ssm for SsmClient { }) } - ///

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn get_ops_item( &self, input: GetOpsItemRequest, @@ -17634,7 +17642,7 @@ impl Ssm for SsmClient { }) } - ///

Defines the default patch baseline.

+ ///

Defines the default patch baseline for the relevant operating system.

To reset the AWS predefined patch baseline as the default, specify the full patch baseline ARN as the baseline ID value. For example, for CentOS, specify arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead of pb-0574b43a65ea646ed.

fn register_default_patch_baseline( &self, input: RegisterDefaultPatchBaselineRequest, @@ -17760,7 +17768,7 @@ impl Ssm for SsmClient { }) } - ///

Removes all tags from the specified resource.

+ ///

Removes tag keys from the specified resource.

fn remove_tags_from_resource( &self, input: RemoveTagsFromResourceRequest, @@ -18095,7 +18103,7 @@ impl Ssm for SsmClient { }) } - ///

The document you want to update.

+ ///

Updates one or more values for an SSM document.

fn update_document( &self, input: UpdateDocumentRequest, @@ -18176,7 +18184,7 @@ impl Ssm for SsmClient { }) } - ///

Modifies the target of an existing maintenance window. You can't change the target type, but you can change the following:

The target from being an ID target to a Tag target, or a Tag target to an ID target.

IDs for an ID target.

Tags for a Tag target.

Owner.

Name.

Description.

If a parameter is null, then the corresponding field is not modified.

+ ///

Modifies the target of an existing maintenance window. You can change the following:

  • Name

  • Description

  • Owner

  • IDs for an ID target

  • Tags for a Tag target

  • From any supported tag type to another. The three supported tag types are ID target, Tag target, and resource group. For more information, see Target.

If a parameter is null, then the corresponding field is not modified.

fn update_maintenance_window_target( &self, input: UpdateMaintenanceWindowTargetRequest, @@ -18228,7 +18236,7 @@ impl Ssm for SsmClient { }) } - ///

Assigns or changes an Amazon Identity and Access Management (IAM) role to the managed instance.

+ ///

Assigns or changes an Amazon Identity and Access Management (IAM) role for the managed instance.

fn update_managed_instance_role( &self, input: UpdateManagedInstanceRoleRequest, @@ -18254,7 +18262,7 @@ impl Ssm for SsmClient { }) } - ///

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting Started with OpsItems in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use the Systems Manager OpsItems capability to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsItems in the AWS Systems Manager User Guide.

+ ///

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

fn update_ops_item( &self, input: UpdateOpsItemRequest, diff --git a/rusoto/services/stepfunctions/Cargo.toml b/rusoto/services/stepfunctions/Cargo.toml index 40680a69608..016c969bae5 100644 --- a/rusoto/services/stepfunctions/Cargo.toml +++ b/rusoto/services/stepfunctions/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_stepfunctions" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/stepfunctions/README.md b/rusoto/services/stepfunctions/README.md index 0dc2ca9867d..bffccc514e7 100644 --- a/rusoto/services/stepfunctions/README.md +++ b/rusoto/services/stepfunctions/README.md @@ -23,9 +23,16 @@ To use `rusoto_stepfunctions` in your application, add it as a dependency in you ```toml [dependencies] -rusoto_stepfunctions = "0.40.0" +rusoto_stepfunctions = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/stepfunctions/src/custom/mod.rs b/rusoto/services/stepfunctions/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/stepfunctions/src/custom/mod.rs +++ b/rusoto/services/stepfunctions/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/stepfunctions/src/generated.rs b/rusoto/services/stepfunctions/src/generated.rs index 7aef121882f..f6716370c38 100644 --- a/rusoto/services/stepfunctions/src/generated.rs +++ b/rusoto/services/stepfunctions/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Contains details about an activity that failed during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -40,7 +39,7 @@ pub struct ActivityFailedEventDetails { ///

Contains details about an activity.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityListItem { ///

The Amazon Resource Name (ARN) that identifies the activity.

#[serde(rename = "activityArn")] @@ -48,14 +47,14 @@ pub struct ActivityListItem { ///

The date the activity is created.

#[serde(rename = "creationDate")] pub creation_date: f64, - ///

The name of the activity.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the activity.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, } ///

Contains details about an activity schedule failure that occurred during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityScheduleFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -69,7 +68,7 @@ pub struct ActivityScheduleFailedEventDetails { ///

Contains details about an activity scheduled during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityScheduledEventDetails { ///

The maximum allowed duration between two heartbeats for the activity task.

#[serde(rename = "heartbeatInSeconds")] @@ -90,7 +89,7 @@ pub struct ActivityScheduledEventDetails { ///

Contains details about the start of an activity during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityStartedEventDetails { ///

The name of the worker that the task is assigned to. These names are provided by the workers when calling GetActivityTask.

#[serde(rename = "workerName")] @@ -100,7 +99,7 @@ pub struct ActivityStartedEventDetails { ///

Contains details about an activity that successfully terminated during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivitySucceededEventDetails { ///

The JSON data output by the activity task.

#[serde(rename = "output")] @@ -110,7 +109,7 @@ pub struct ActivitySucceededEventDetails { ///

Contains details about an activity timeout that occurred during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTimedOutEventDetails { ///

A more detailed explanation of the cause of the timeout.

#[serde(rename = "cause")] @@ -124,17 +123,17 @@ pub struct ActivityTimedOutEventDetails { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateActivityInput { - ///

The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, - ///

The list of tags to add to a resource.

+ ///

The list of tags to add to a resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateActivityOutput { ///

The Amazon Resource Name (ARN) that identifies the created activity.

#[serde(rename = "activityArn")] @@ -149,20 +148,20 @@ pub struct CreateStateMachineInput { ///

The Amazon States Language definition of the state machine. See Amazon States Language.

#[serde(rename = "definition")] pub definition: String, - ///

The name of the state machine.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the state machine.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, ///

The Amazon Resource Name (ARN) of the IAM role to use for this state machine.

#[serde(rename = "roleArn")] pub role_arn: String, - ///

Tags to be added when creating a state machine.

+ ///

Tags to be added when creating a state machine.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

#[serde(rename = "tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStateMachineOutput { ///

The date the state machine is created.

#[serde(rename = "creationDate")] @@ -180,7 +179,7 @@ pub struct DeleteActivityInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteActivityOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -191,7 +190,7 @@ pub struct DeleteStateMachineInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteStateMachineOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -202,7 +201,7 @@ pub struct DescribeActivityInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeActivityOutput { ///

The Amazon Resource Name (ARN) that identifies the activity.

#[serde(rename = "activityArn")] @@ -210,7 +209,7 @@ pub struct DescribeActivityOutput { ///

The date the activity is created.

#[serde(rename = "creationDate")] pub creation_date: f64, - ///

The name of the activity.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the activity.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, } @@ -223,7 +222,7 @@ pub struct DescribeExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeExecutionOutput { ///

The Amazon Resource Name (ARN) that identifies the execution.

#[serde(rename = "executionArn")] @@ -231,7 +230,7 @@ pub struct DescribeExecutionOutput { ///

The string that contains the JSON input data of the execution.

#[serde(rename = "input")] pub input: String, - ///

The name of the execution.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the execution.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, @@ -262,7 +261,7 @@ pub struct DescribeStateMachineForExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStateMachineForExecutionOutput { ///

The Amazon States Language definition of the state machine. See Amazon States Language.

#[serde(rename = "definition")] @@ -289,7 +288,7 @@ pub struct DescribeStateMachineInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStateMachineOutput { ///

The date the state machine is created.

#[serde(rename = "creationDate")] @@ -297,7 +296,7 @@ pub struct DescribeStateMachineOutput { ///

The Amazon States Language definition of the state machine. See Amazon States Language.

#[serde(rename = "definition")] pub definition: String, - ///

The name of the state machine.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the state machine.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, ///

The Amazon Resource Name (ARN) of the IAM role used when creating this state machine. (The IAM role maintains security by granting Step Functions access to AWS resources.)

@@ -314,7 +313,7 @@ pub struct DescribeStateMachineOutput { ///

Contains details about an abort of an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecutionAbortedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -328,7 +327,7 @@ pub struct ExecutionAbortedEventDetails { ///

Contains details about an execution failure event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecutionFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -342,12 +341,12 @@ pub struct ExecutionFailedEventDetails { ///

Contains details about an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecutionListItem { ///

The Amazon Resource Name (ARN) that identifies the execution.

#[serde(rename = "executionArn")] pub execution_arn: String, - ///

The name of the execution.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the execution.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, ///

The date the execution started.

@@ -367,7 +366,7 @@ pub struct ExecutionListItem { ///

Contains details about the start of the execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecutionStartedEventDetails { ///

The JSON data input to the execution.

#[serde(rename = "input")] @@ -381,7 +380,7 @@ pub struct ExecutionStartedEventDetails { ///

Contains details about the successful termination of the execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecutionSucceededEventDetails { ///

The JSON data output by the execution.

#[serde(rename = "output")] @@ -391,7 +390,7 @@ pub struct ExecutionSucceededEventDetails { ///

Contains details about the execution timeout that occurred during the execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExecutionTimedOutEventDetails { ///

A more detailed explanation of the cause of the timeout.

#[serde(rename = "cause")] @@ -415,7 +414,7 @@ pub struct GetActivityTaskInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetActivityTaskOutput { ///

The string that contains the JSON input data for the task.

#[serde(rename = "input")] @@ -447,7 +446,7 @@ pub struct GetExecutionHistoryInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetExecutionHistoryOutput { ///

The list of events that occurred in the execution.

#[serde(rename = "events")] @@ -460,7 +459,7 @@ pub struct GetExecutionHistoryOutput { ///

Contains details about the events of an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HistoryEvent { #[serde(rename = "activityFailedEventDetails")] #[serde(skip_serializing_if = "Option::is_none")] @@ -520,6 +519,26 @@ pub struct HistoryEvent { #[serde(rename = "lambdaFunctionTimedOutEventDetails")] #[serde(skip_serializing_if = "Option::is_none")] pub lambda_function_timed_out_event_details: Option, + ///

Contains details about an iteration of a Map state that was aborted.

+ #[serde(rename = "mapIterationAbortedEventDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub map_iteration_aborted_event_details: Option, + ///

Contains details about an iteration of a Map state that failed.

+ #[serde(rename = "mapIterationFailedEventDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub map_iteration_failed_event_details: Option, + ///

Contains details about an iteration of a Map state that was started.

+ #[serde(rename = "mapIterationStartedEventDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub map_iteration_started_event_details: Option, + ///

Contains details about an iteration of a Map state that succeeded.

+ #[serde(rename = "mapIterationSucceededEventDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub map_iteration_succeeded_event_details: Option, + ///

Contains details about Map state that was started.

+ #[serde(rename = "mapStateStartedEventDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub map_state_started_event_details: Option, ///

The id of the previous event.

#[serde(rename = "previousEventId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -572,7 +591,7 @@ pub struct HistoryEvent { ///

Contains details about a lambda function that failed during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -586,7 +605,7 @@ pub struct LambdaFunctionFailedEventDetails { ///

Contains details about a failed lambda function schedule event that occurred during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionScheduleFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -600,7 +619,7 @@ pub struct LambdaFunctionScheduleFailedEventDetails { ///

Contains details about a lambda function scheduled during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionScheduledEventDetails { ///

The JSON data input to the lambda function.

#[serde(rename = "input")] @@ -617,7 +636,7 @@ pub struct LambdaFunctionScheduledEventDetails { ///

Contains details about a lambda function that failed to start during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionStartFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -631,7 +650,7 @@ pub struct LambdaFunctionStartFailedEventDetails { ///

Contains details about a lambda function that successfully terminated during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionSucceededEventDetails { ///

The JSON data output by the lambda function.

#[serde(rename = "output")] @@ -641,7 +660,7 @@ pub struct LambdaFunctionSucceededEventDetails { ///

Contains details about a lambda function timeout that occurred during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionTimedOutEventDetails { ///

A more detailed explanation of the cause of the timeout.

#[serde(rename = "cause")] @@ -666,7 +685,7 @@ pub struct ListActivitiesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListActivitiesOutput { ///

The list of activities.

#[serde(rename = "activities")] @@ -697,7 +716,7 @@ pub struct ListExecutionsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListExecutionsOutput { ///

The list of matching executions.

#[serde(rename = "executions")] @@ -721,7 +740,7 @@ pub struct ListStateMachinesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListStateMachinesOutput { ///

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

#[serde(rename = "nextToken")] @@ -739,7 +758,7 @@ pub struct ListTagsForResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceOutput { ///

An array of tags associated with the resource.

#[serde(rename = "tags")] @@ -747,6 +766,30 @@ pub struct ListTagsForResourceOutput { pub tags: Option>, } +///

Contains details about an iteration of a Map state.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MapIterationEventDetails { + ///

The index of the array belonging to the Map state iteration.

+ #[serde(rename = "index")] + #[serde(skip_serializing_if = "Option::is_none")] + pub index: Option, + ///

The name of the iteration’s parent Map state.

+ #[serde(rename = "name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +///

Details about a Map state that was started.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct MapStateStartedEventDetails { + ///

The size of the array for Map state iterations.

+ #[serde(rename = "length")] + #[serde(skip_serializing_if = "Option::is_none")] + pub length: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SendTaskFailureInput { ///

A more detailed explanation of the cause of the failure.

@@ -757,24 +800,24 @@ pub struct SendTaskFailureInput { #[serde(rename = "error")] #[serde(skip_serializing_if = "Option::is_none")] pub error: Option, - ///

The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTask::taskToken).

+ ///

The token that represents this task. Task tokens are generated by Step Functions when tasks are assigned to a worker, or in the context object when a workflow enters a task state. See GetActivityTaskOutput$taskToken.

#[serde(rename = "taskToken")] pub task_token: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendTaskFailureOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SendTaskHeartbeatInput { - ///

The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken).

+ ///

The token that represents this task. Task tokens are generated by Step Functions when tasks are assigned to a worker, or in the context object when a workflow enters a task state. See GetActivityTaskOutput$taskToken.

#[serde(rename = "taskToken")] pub task_token: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendTaskHeartbeatOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -782,13 +825,13 @@ pub struct SendTaskSuccessInput { ///

The JSON output of the task.

#[serde(rename = "output")] pub output: String, - ///

The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken).

+ ///

The token that represents this task. Task tokens are generated by Step Functions when tasks are assigned to a worker, or in the context object when a workflow enters a task state. See GetActivityTaskOutput$taskToken.

#[serde(rename = "taskToken")] pub task_token: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SendTaskSuccessOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -797,7 +840,7 @@ pub struct StartExecutionInput { #[serde(rename = "input")] #[serde(skip_serializing_if = "Option::is_none")] pub input: Option, - ///

The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, @@ -807,7 +850,7 @@ pub struct StartExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartExecutionOutput { ///

The Amazon Resource Name (ARN) that identifies the execution.

#[serde(rename = "executionArn")] @@ -819,7 +862,7 @@ pub struct StartExecutionOutput { ///

Contains details about a state entered during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StateEnteredEventDetails { ///

The string that contains the JSON input data for the state.

#[serde(rename = "input")] @@ -832,9 +875,9 @@ pub struct StateEnteredEventDetails { ///

Contains details about an exit from a state during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StateExitedEventDetails { - ///

The name of the state.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the state.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, ///

The JSON output data of the state.

@@ -845,12 +888,12 @@ pub struct StateExitedEventDetails { ///

Contains details about the state machine.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StateMachineListItem { ///

The date the state machine is created.

#[serde(rename = "creationDate")] pub creation_date: f64, - ///

The name of the state machine.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

+ ///

The name of the state machine.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters " # % \ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

#[serde(rename = "name")] pub name: String, ///

The Amazon Resource Name (ARN) that identifies the state machine.

@@ -874,14 +917,14 @@ pub struct StopExecutionInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopExecutionOutput { ///

The date the execution is stopped.

#[serde(rename = "stopDate")] pub stop_date: f64, } -///

Tags are key-value pairs that can be associated with Step Functions state machines and activities.

+///

Tags are key-value pairs that can be associated with Step Functions state machines and activities.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Tag { ///

The key of a tag.

@@ -899,18 +942,18 @@ pub struct TagResourceInput { ///

The Amazon Resource Name (ARN) for the Step Functions state machine or activity.

#[serde(rename = "resourceArn")] pub resource_arn: String, - ///

The list of tags to add to a resource.

Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @.

+ ///

The list of tags to add to a resource.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

#[serde(rename = "tags")] pub tags: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TagResourceOutput {} ///

Contains details about a task failure event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -930,7 +973,7 @@ pub struct TaskFailedEventDetails { ///

Contains details about a task scheduled during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskScheduledEventDetails { ///

The JSON data passed to the resource referenced in a task state.

#[serde(rename = "parameters")] @@ -952,7 +995,7 @@ pub struct TaskScheduledEventDetails { ///

Contains details about a task that failed to start during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskStartFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -972,7 +1015,7 @@ pub struct TaskStartFailedEventDetails { ///

Contains details about the start of a task during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskStartedEventDetails { ///

The service name of the resource in a task state.

#[serde(rename = "resource")] @@ -984,7 +1027,7 @@ pub struct TaskStartedEventDetails { ///

Contains details about a task that failed to submit during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskSubmitFailedEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -1004,7 +1047,7 @@ pub struct TaskSubmitFailedEventDetails { ///

Contains details about a task submitted to a resource .

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskSubmittedEventDetails { ///

The response from a resource when a task has started.

#[serde(rename = "output")] @@ -1020,7 +1063,7 @@ pub struct TaskSubmittedEventDetails { ///

Contains details about the successful completion of a task state.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskSucceededEventDetails { ///

The full JSON response from a resource when a task has succeeded. This response becomes the output of the related task.

#[serde(rename = "output")] @@ -1036,7 +1079,7 @@ pub struct TaskSucceededEventDetails { ///

Contains details about a resource timeout that occurred during an execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TaskTimedOutEventDetails { ///

A more detailed explanation of the cause of the failure.

#[serde(rename = "cause")] @@ -1065,7 +1108,7 @@ pub struct UntagResourceInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UntagResourceOutput {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -1084,7 +1127,7 @@ pub struct UpdateStateMachineInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateStateMachineOutput { ///

The date and time the state machine was updated.

#[serde(rename = "updateDate")] @@ -1663,7 +1706,7 @@ impl Error for ListStateMachinesError { pub enum ListTagsForResourceError { ///

The provided Amazon Resource Name (ARN) is invalid.

InvalidArn(String), - ///

Could not fine the referenced resource. Only state machine and activity ARNs are supported.

+ ///

Could not find the referenced resource. Only state machine and activity ARNs are supported.

ResourceNotFound(String), } @@ -1961,7 +2004,7 @@ impl Error for StopExecutionError { pub enum TagResourceError { ///

The provided Amazon Resource Name (ARN) is invalid.

InvalidArn(String), - ///

Could not fine the referenced resource. Only state machine and activity ARNs are supported.

+ ///

Could not find the referenced resource. Only state machine and activity ARNs are supported.

ResourceNotFound(String), ///

You've exceeded the number of tags allowed for a resource. See the Limits Topic in the AWS Step Functions Developer Guide.

TooManyTags(String), @@ -2004,7 +2047,7 @@ impl Error for TagResourceError { pub enum UntagResourceError { ///

The provided Amazon Resource Name (ARN) is invalid.

InvalidArn(String), - ///

Could not fine the referenced resource. Only state machine and activity ARNs are supported.

+ ///

Could not find the referenced resource. Only state machine and activity ARNs are supported.

ResourceNotFound(String), } @@ -2105,13 +2148,13 @@ impl Error for UpdateStateMachineError { } /// Trait representing the capabilities of the AWS SFN API. AWS SFN clients implement this trait. pub trait StepFunctions { - ///

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

+ ///

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateActivity is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateActivity's idempotency check is based on the activity name. If a following request has different tags values, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, tags will not be updated, even if they are different.

fn create_activity( &self, input: CreateActivityInput, ) -> RusotoFuture; - ///

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

+ ///

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name and definition. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

fn create_state_machine( &self, input: CreateStateMachineInput, @@ -2183,25 +2226,25 @@ pub trait StepFunctions { input: ListStateMachinesInput, ) -> RusotoFuture; - ///

List tags for a given resource.

+ ///

List tags for a given resource.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

fn list_tags_for_resource( &self, input: ListTagsForResourceInput, ) -> RusotoFuture; - ///

Used by workers to report that the task identified by the taskToken failed.

+ ///

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken failed.

fn send_task_failure( &self, input: SendTaskFailureInput, ) -> RusotoFuture; - ///

Used by workers to report to the service that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition. This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut event.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received.

This operation is only useful for long-lived tasks to report the liveliness of the task.

+ ///

Used by activity workers and task states using the callback pattern to report to Step Functions that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition (HeartbeatSeconds). This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut entry for activities, or a TaskTimedOut entry for for tasks using the job run or callback pattern.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received. Use HeartbeatSeconds to configure the timeout interval for heartbeats.

fn send_task_heartbeat( &self, input: SendTaskHeartbeatInput, ) -> RusotoFuture; - ///

Used by workers to report that the task identified by the taskToken completed successfully.

+ ///

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken completed successfully.

fn send_task_success( &self, input: SendTaskSuccessInput, @@ -2219,7 +2262,7 @@ pub trait StepFunctions { input: StopExecutionInput, ) -> RusotoFuture; - ///

Add a tag to a Step Functions resource.

+ ///

Add a tag to a Step Functions resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

fn tag_resource( &self, input: TagResourceInput, @@ -2249,10 +2292,7 @@ impl StepFunctionsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> StepFunctionsClient { - StepFunctionsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2266,15 +2306,19 @@ impl StepFunctionsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - StepFunctionsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> StepFunctionsClient { + StepFunctionsClient { client, region } } } impl StepFunctions for StepFunctionsClient { - ///

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

+ ///

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateActivity is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateActivity's idempotency check is based on the activity name. If a following request has different tags values, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, tags will not be updated, even if they are different.

fn create_activity( &self, input: CreateActivityInput, @@ -2303,7 +2347,7 @@ impl StepFunctions for StepFunctionsClient { }) } - ///

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

+ ///

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name and definition. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

fn create_state_machine( &self, input: CreateStateMachineInput, @@ -2652,7 +2696,7 @@ impl StepFunctions for StepFunctionsClient { }) } - ///

List tags for a given resource.

+ ///

List tags for a given resource.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

fn list_tags_for_resource( &self, input: ListTagsForResourceInput, @@ -2680,7 +2724,7 @@ impl StepFunctions for StepFunctionsClient { }) } - ///

Used by workers to report that the task identified by the taskToken failed.

+ ///

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken failed.

fn send_task_failure( &self, input: SendTaskFailureInput, @@ -2709,7 +2753,7 @@ impl StepFunctions for StepFunctionsClient { }) } - ///

Used by workers to report to the service that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition. This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut event.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received.

This operation is only useful for long-lived tasks to report the liveliness of the task.

+ ///

Used by activity workers and task states using the callback pattern to report to Step Functions that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition (HeartbeatSeconds). This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut entry for activities, or a TaskTimedOut entry for for tasks using the job run or callback pattern.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received. Use HeartbeatSeconds to configure the timeout interval for heartbeats.

fn send_task_heartbeat( &self, input: SendTaskHeartbeatInput, @@ -2738,7 +2782,7 @@ impl StepFunctions for StepFunctionsClient { }) } - ///

Used by workers to report that the task identified by the taskToken completed successfully.

+ ///

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken completed successfully.

fn send_task_success( &self, input: SendTaskSuccessInput, @@ -2825,7 +2869,7 @@ impl StepFunctions for StepFunctionsClient { }) } - ///

Add a tag to a Step Functions resource.

+ ///

Add a tag to a Step Functions resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

fn tag_resource( &self, input: TagResourceInput, diff --git a/rusoto/services/storagegateway/Cargo.toml b/rusoto/services/storagegateway/Cargo.toml index f90d2559cc4..8df8826f75c 100644 --- a/rusoto/services/storagegateway/Cargo.toml +++ b/rusoto/services/storagegateway/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_storagegateway" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/storagegateway/README.md b/rusoto/services/storagegateway/README.md index 418e2e908be..db8b44bcfdd 100644 --- a/rusoto/services/storagegateway/README.md +++ b/rusoto/services/storagegateway/README.md @@ -23,9 +23,16 @@ To use `rusoto_storagegateway` in your application, add it as a dependency in yo ```toml [dependencies] -rusoto_storagegateway = "0.40.0" +rusoto_storagegateway = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/storagegateway/src/custom/mod.rs b/rusoto/services/storagegateway/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/storagegateway/src/custom/mod.rs +++ b/rusoto/services/storagegateway/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/storagegateway/src/generated.rs b/rusoto/services/storagegateway/src/generated.rs index 2fa28052f06..86b286d787d 100644 --- a/rusoto/services/storagegateway/src/generated.rs +++ b/rusoto/services/storagegateway/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -33,7 +32,7 @@ pub struct ActivateGatewayInput { ///

The name you configured for your gateway.

#[serde(rename = "GatewayName")] pub gateway_name: String, - ///

A value that indicates the region where you want to store your data. The gateway region specified must be the same region as the region in your Host header in the request. For more information about available regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

Valid Values: See AWS Storage Gateway Regions and Endpoints in the AWS General Reference.

+ ///

A value that indicates the AWS Region where you want to store your data. The gateway AWS Region specified must be the same AWS Region as the AWS Region in your Host header in the request. For more information about available AWS Regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

Valid Values: See AWS Storage Gateway Regions and Endpoints in the AWS General Reference.

#[serde(rename = "GatewayRegion")] pub gateway_region: String, ///

A value that indicates the time zone you want to set for the gateway. The time zone is of the format "GMT-hr:mm" or "GMT+hr:mm". For example, GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule.

@@ -47,7 +46,7 @@ pub struct ActivateGatewayInput { #[serde(rename = "MediumChangerType")] #[serde(skip_serializing_if = "Option::is_none")] pub medium_changer_type: Option, - ///

A list of up to 50 tags that can be assigned to the gateway. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

+ ///

A list of up to 50 tags that you can assign to the gateway. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers that can be represented in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256 characters.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -57,9 +56,9 @@ pub struct ActivateGatewayInput { pub tape_drive_type: Option, } -///

AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.

For gateways activated prior to September 02, 2015, the gateway ARN contains the gateway name rather than the gateway ID. Changing the name of the gateway has no effect on the gateway ARN.

+///

AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and AWS Region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.

For gateways activated prior to September 02, 2015, the gateway ARN contains the gateway name rather than the gateway ID. Changing the name of the gateway has no effect on the gateway ARN.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivateGatewayOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -76,7 +75,7 @@ pub struct AddCacheInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddCacheOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -96,7 +95,7 @@ pub struct AddTagsToResourceInput { ///

AddTagsToResourceOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddTagsToResourceOutput { ///

The Amazon Resource Name (ARN) of the resource you want to add tags to.

#[serde(rename = "ResourceARN")] @@ -114,7 +113,7 @@ pub struct AddUploadBufferInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddUploadBufferOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -133,7 +132,7 @@ pub struct AddWorkingStorageInput { ///

A JSON object containing the of the gateway for which working storage was configured.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddWorkingStorageOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -151,7 +150,7 @@ pub struct AssignTapePoolInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssignTapePoolOutput { ///

The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.

#[serde(rename = "TapeARN")] @@ -183,7 +182,7 @@ pub struct AttachVolumeInput { ///

AttachVolumeOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachVolumeOutput { ///

The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name for the initiator that was used to connect to the target.

#[serde(rename = "TargetARN")] @@ -197,7 +196,7 @@ pub struct AttachVolumeOutput { ///

Describes an iSCSI cached volume.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CachediSCSIVolume { ///

The date the volume was created. Volumes created prior to March 28, 2017 don’t have this time stamp.

#[serde(rename = "CreatedDate")] @@ -264,7 +263,7 @@ pub struct CancelArchivalInput { ///

CancelArchivalOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelArchivalOutput { ///

The Amazon Resource Name (ARN) of the virtual tape for which archiving was canceled.

#[serde(rename = "TapeARN")] @@ -284,7 +283,7 @@ pub struct CancelRetrievalInput { ///

CancelRetrievalOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelRetrievalOutput { ///

The Amazon Resource Name (ARN) of the virtual tape for which retrieval was canceled.

#[serde(rename = "TapeARN")] @@ -294,7 +293,7 @@ pub struct CancelRetrievalOutput { ///

Describes Challenge-Handshake Authentication Protocol (CHAP) information that supports authentication between your gateway and iSCSI initiators.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChapInfo { ///

The iSCSI initiator that connects to the target.

#[serde(rename = "InitiatorName")] @@ -340,7 +339,7 @@ pub struct CreateCachediSCSIVolumeInput { #[serde(rename = "SourceVolumeARN")] #[serde(skip_serializing_if = "Option::is_none")] pub source_volume_arn: Option, - ///

A list of up to 50 tags that can be assigned to a cached volume. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

+ ///

A list of up to 50 tags that you can assign to a cached volume. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers that you can represent in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256 characters.

#[serde(rename = "Tags")] #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option>, @@ -353,7 +352,7 @@ pub struct CreateCachediSCSIVolumeInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCachediSCSIVolumeOutput { ///

The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.

#[serde(rename = "TargetARN")] @@ -428,7 +427,7 @@ pub struct CreateNFSFileShareInput { ///

CreateNFSFileShareOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNFSFileShareOutput { ///

The Amazon Resource Name (ARN) of the newly created file share.

#[serde(rename = "FileShareARN")] @@ -439,7 +438,7 @@ pub struct CreateNFSFileShareOutput { ///

CreateSMBFileShareInput

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateSMBFileShareInput { - ///

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

+ ///

A list of users in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

#[serde(rename = "AdminUserList")] #[serde(skip_serializing_if = "Option::is_none")] pub admin_user_list: Option>, @@ -507,7 +506,7 @@ pub struct CreateSMBFileShareInput { ///

CreateSMBFileShareOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSMBFileShareOutput { ///

The Amazon Resource Name (ARN) of the newly created file share.

#[serde(rename = "FileShareARN")] @@ -520,13 +519,17 @@ pub struct CreateSnapshotFromVolumeRecoveryPointInput { ///

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

#[serde(rename = "SnapshotDescription")] pub snapshot_description: String, + ///

A list of up to 50 tags that can be assigned to a snapshot. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.

#[serde(rename = "VolumeARN")] pub volume_arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSnapshotFromVolumeRecoveryPointOutput { ///

The ID of the snapshot.

#[serde(rename = "SnapshotId")] @@ -559,7 +562,7 @@ pub struct CreateSnapshotInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSnapshotOutput { ///

The snapshot ID that is used to refer to the snapshot in future operations such as describing snapshots (Amazon Elastic Compute Cloud API DescribeSnapshots) or creating a volume from a snapshot (CreateStorediSCSIVolume).

#[serde(rename = "SnapshotId")] @@ -608,7 +611,7 @@ pub struct CreateStorediSCSIVolumeInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateStorediSCSIVolumeOutput { ///

The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.

#[serde(rename = "TargetARN")] @@ -627,7 +630,7 @@ pub struct CreateStorediSCSIVolumeOutput { ///

CreateTapeWithBarcodeInput

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateTapeWithBarcodeInput { - ///

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and region.

+ ///

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

#[serde(rename = "GatewayARN")] pub gateway_arn: String, ///

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

@@ -656,7 +659,7 @@ pub struct CreateTapeWithBarcodeInput { ///

CreateTapeOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTapeWithBarcodeOutput { ///

A unique Amazon Resource Name (ARN) that represents the virtual tape that was created.

#[serde(rename = "TapeARN")] @@ -670,7 +673,7 @@ pub struct CreateTapesInput { ///

A unique identifier that you use to retry a request. If you retry a request, use the same ClientToken you specified in the initial request.

Using the same ClientToken prevents creating the tape multiple times.

#[serde(rename = "ClientToken")] pub client_token: String, - ///

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and region.

+ ///

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

#[serde(rename = "GatewayARN")] pub gateway_arn: String, ///

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

@@ -702,7 +705,7 @@ pub struct CreateTapesInput { ///

CreateTapeOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTapesOutput { ///

A list of unique Amazon Resource Names (ARNs) that represents the virtual tapes that were created.

#[serde(rename = "TapeARNs")] @@ -722,7 +725,7 @@ pub struct DeleteBandwidthRateLimitInput { ///

A JSON object containing the of the gateway whose bandwidth rate information was deleted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteBandwidthRateLimitOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -742,7 +745,7 @@ pub struct DeleteChapCredentialsInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteChapCredentialsOutput { ///

The iSCSI initiator that connects to the target.

#[serde(rename = "InitiatorName")] @@ -768,7 +771,7 @@ pub struct DeleteFileShareInput { ///

DeleteFileShareOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFileShareOutput { ///

The Amazon Resource Name (ARN) of the deleted file share.

#[serde(rename = "FileShareARN")] @@ -785,7 +788,7 @@ pub struct DeleteGatewayInput { ///

A JSON object containing the ID of the deleted gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGatewayOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -800,7 +803,7 @@ pub struct DeleteSnapshotScheduleInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSnapshotScheduleOutput { ///

The volume which snapshot schedule was deleted.

#[serde(rename = "VolumeARN")] @@ -818,7 +821,7 @@ pub struct DeleteTapeArchiveInput { ///

DeleteTapeArchiveOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTapeArchiveOutput { ///

The Amazon Resource Name (ARN) of the virtual tape that was deleted from the virtual tape shelf (VTS).

#[serde(rename = "TapeARN")] @@ -829,7 +832,7 @@ pub struct DeleteTapeArchiveOutput { ///

DeleteTapeInput

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteTapeInput { - ///

The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and region.

+ ///

The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

#[serde(rename = "GatewayARN")] pub gateway_arn: String, ///

The Amazon Resource Name (ARN) of the virtual tape to delete.

@@ -839,7 +842,7 @@ pub struct DeleteTapeInput { ///

DeleteTapeOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTapeOutput { ///

The Amazon Resource Name (ARN) of the deleted virtual tape.

#[serde(rename = "TapeARN")] @@ -857,7 +860,7 @@ pub struct DeleteVolumeInput { ///

A JSON object containing the of the storage volume that was deleted

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteVolumeOutput { ///

The Amazon Resource Name (ARN) of the storage volume that was deleted. It is the same ARN you provided in the request.

#[serde(rename = "VolumeARN")] @@ -874,7 +877,7 @@ pub struct DescribeBandwidthRateLimitInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeBandwidthRateLimitOutput { ///

The average download bandwidth rate limit in bits per second. This field does not appear in the response if the download rate limit is not set.

#[serde(rename = "AverageDownloadRateLimitInBitsPerSec")] @@ -896,7 +899,7 @@ pub struct DescribeCacheInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCacheOutput { ///

The amount of cache in bytes allocated to the a gateway.

#[serde(rename = "CacheAllocatedInBytes")] @@ -936,7 +939,7 @@ pub struct DescribeCachediSCSIVolumesInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCachediSCSIVolumesOutput { ///

An array of objects where each object contains metadata about one cached volume.

#[serde(rename = "CachediSCSIVolumes")] @@ -954,7 +957,7 @@ pub struct DescribeChapCredentialsInput { ///

A JSON object containing a .

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeChapCredentialsOutput { ///

An array of ChapInfo objects that represent CHAP credentials. Each object in the array contains CHAP credential information for one target-initiator pair. If no CHAP credentials are set, an empty array is returned. CHAP credential information is provided in a JSON object with the following fields:

  • InitiatorName: The iSCSI initiator that connects to the target.

  • SecretToAuthenticateInitiator: The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

  • SecretToAuthenticateTarget: The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

  • TargetARN: The Amazon Resource Name (ARN) of the storage volume.

#[serde(rename = "ChapCredentials")] @@ -971,8 +974,12 @@ pub struct DescribeGatewayInformationInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGatewayInformationOutput { + ///

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the gateway.

+ #[serde(rename = "CloudWatchLogGroupARN")] + #[serde(skip_serializing_if = "Option::is_none")] + pub cloud_watch_log_group_arn: Option, ///

The ID of the Amazon EC2 instance that was used to launch the gateway.

#[serde(rename = "Ec2InstanceId")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1035,7 +1042,7 @@ pub struct DescribeMaintenanceStartTimeInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeMaintenanceStartTimeOutput { ///

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

This value is only available for tape and volume gateways.

#[serde(rename = "DayOfMonth")] @@ -1072,7 +1079,7 @@ pub struct DescribeNFSFileSharesInput { ///

DescribeNFSFileSharesOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeNFSFileSharesOutput { ///

An array containing a description for each requested file share.

#[serde(rename = "NFSFileShareInfoList")] @@ -1090,7 +1097,7 @@ pub struct DescribeSMBFileSharesInput { ///

DescribeSMBFileSharesOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSMBFileSharesOutput { ///

An array containing a description for each requested file share.

#[serde(rename = "SMBFileShareInfoList")] @@ -1105,7 +1112,7 @@ pub struct DescribeSMBSettingsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSMBSettingsOutput { ///

The name of the domain that the gateway is joined to.

#[serde(rename = "DomainName")] @@ -1118,7 +1125,7 @@ pub struct DescribeSMBSettingsOutput { #[serde(rename = "SMBGuestPasswordSet")] #[serde(skip_serializing_if = "Option::is_none")] pub smb_guest_password_set: Option, - ///

The type of security strategy that was specified for file gateway.

ClientSpecified: SMBv1 is enabled, SMB signing is offered but not required, SMB encryption is offered but not required.

MandatorySigning: SMBv1 is disabled, SMB signing is required, SMB encryption is offered but not required.

MandatoryEncryption: SMBv1 is disabled, SMB signing is offered but not required, SMB encryption is required.

+ ///

The type of security strategy that was specified for file gateway.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

#[serde(rename = "SMBSecurityStrategy")] #[serde(skip_serializing_if = "Option::is_none")] pub smb_security_strategy: Option, @@ -1133,7 +1140,7 @@ pub struct DescribeSnapshotScheduleInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSnapshotScheduleOutput { ///

The snapshot description.

#[serde(rename = "Description")] @@ -1147,6 +1154,10 @@ pub struct DescribeSnapshotScheduleOutput { #[serde(rename = "StartAt")] #[serde(skip_serializing_if = "Option::is_none")] pub start_at: Option, + ///

A list of up to 50 tags assigned to the snapshot schedule, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ListTagsForResource API operation.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, ///

A value that indicates the time zone of the gateway.

#[serde(rename = "Timezone")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1166,7 +1177,7 @@ pub struct DescribeStorediSCSIVolumesInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeStorediSCSIVolumesOutput { ///

Describes a single unit of output from DescribeStorediSCSIVolumes. The following fields are returned:

  • ChapEnabled: Indicates whether mutual CHAP is enabled for the iSCSI target.

  • LunNumber: The logical disk number.

  • NetworkInterfaceId: The network interface ID of the stored volume that initiator use to map the stored volume as an iSCSI target.

  • NetworkInterfacePort: The port used to communicate with iSCSI targets.

  • PreservedExistingData: Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

  • SourceSnapshotId: If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, this field is not included.

  • StorediSCSIVolumes: An array of StorediSCSIVolume objects where each object contains metadata about one stored volume.

  • TargetARN: The Amazon Resource Name (ARN) of the volume target.

  • VolumeARN: The Amazon Resource Name (ARN) of the stored volume.

  • VolumeDiskId: The disk ID of the local disk that was specified in the CreateStorediSCSIVolume operation.

  • VolumeId: The unique identifier of the storage volume, e.g. vol-1122AABB.

  • VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.

  • VolumeProgress: Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.

  • VolumeSizeInBytes: The size of the volume in bytes.

  • VolumeStatus: One of the VolumeStatus values that indicates the state of the volume.

  • VolumeType: One of the enumeration values describing the type of the volume. Currently, on STORED volumes are supported.

#[serde(rename = "StorediSCSIVolumes")] @@ -1193,7 +1204,7 @@ pub struct DescribeTapeArchivesInput { ///

DescribeTapeArchivesOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTapeArchivesOutput { ///

An opaque string that indicates the position at which the virtual tapes that were fetched for description ended. Use this marker in your next request to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If there are no more virtual tapes to describe, this field does not appear in the response.

#[serde(rename = "Marker")] @@ -1222,7 +1233,7 @@ pub struct DescribeTapeRecoveryPointsInput { ///

DescribeTapeRecoveryPointsOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTapeRecoveryPointsOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1258,7 +1269,7 @@ pub struct DescribeTapesInput { ///

DescribeTapesOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTapesOutput { ///

An opaque string which can be used as part of a subsequent DescribeTapes call to retrieve the next page of results.

If a response does not contain a marker, then there are no more results to be retrieved.

#[serde(rename = "Marker")] @@ -1277,7 +1288,7 @@ pub struct DescribeUploadBufferInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUploadBufferOutput { ///

An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.

#[serde(rename = "DiskIds")] @@ -1317,7 +1328,7 @@ pub struct DescribeVTLDevicesInput { ///

DescribeVTLDevicesOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeVTLDevicesOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1341,7 +1352,7 @@ pub struct DescribeWorkingStorageInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkingStorageOutput { ///

An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.

#[serde(rename = "DiskIds")] @@ -1374,7 +1385,7 @@ pub struct DetachVolumeInput { ///

AttachVolumeOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DetachVolumeOutput { ///

The Amazon Resource Name (ARN) of the volume that was detached.

#[serde(rename = "VolumeARN")] @@ -1384,7 +1395,7 @@ pub struct DetachVolumeOutput { ///

Lists iSCSI information about a VTL device.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceiSCSIAttributes { ///

Indicates whether mutual CHAP is enabled for the iSCSI target.

#[serde(rename = "ChapEnabled")] @@ -1413,7 +1424,7 @@ pub struct DisableGatewayInput { ///

DisableGatewayOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisableGatewayOutput { ///

The unique Amazon Resource Name (ARN) of the disabled gateway.

#[serde(rename = "GatewayARN")] @@ -1423,7 +1434,7 @@ pub struct DisableGatewayOutput { ///

Represents a gateway's local disk.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Disk { ///

The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

#[serde(rename = "DiskAllocationResource")] @@ -1459,7 +1470,7 @@ pub struct Disk { ///

Describes a file share.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FileShareInfo { #[serde(rename = "FileShareARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1480,7 +1491,7 @@ pub struct FileShareInfo { ///

Describes a gateway object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GatewayInfo { ///

The ID of the Amazon EC2 instance that was used to launch the gateway.

#[serde(rename = "Ec2InstanceId")] @@ -1490,7 +1501,7 @@ pub struct GatewayInfo { #[serde(rename = "Ec2InstanceRegion")] #[serde(skip_serializing_if = "Option::is_none")] pub ec_2_instance_region: Option, - ///

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

+ ///

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

#[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] pub gateway_arn: Option, @@ -1522,10 +1533,10 @@ pub struct JoinDomainInput { ///

The name of the domain that you want the gateway to join.

#[serde(rename = "DomainName")] pub domain_name: String, - ///

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

+ ///

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

#[serde(rename = "GatewayARN")] pub gateway_arn: String, - ///

The organizational unit (OU) is a container with an Active Directory that can hold users, groups, computers, and other OUs and this parameter specifies the OU that the gateway will join within the AD domain.

+ ///

The organizational unit (OU) is a container in an Active Directory that can hold users, groups, computers, and other OUs and this parameter specifies the OU that the gateway will join within the AD domain.

#[serde(rename = "OrganizationalUnit")] #[serde(skip_serializing_if = "Option::is_none")] pub organizational_unit: Option, @@ -1539,7 +1550,7 @@ pub struct JoinDomainInput { ///

JoinDomainOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct JoinDomainOutput { ///

The unique Amazon Resource Name (ARN) of the gateway that joined the domain.

#[serde(rename = "GatewayARN")] @@ -1566,7 +1577,7 @@ pub struct ListFileSharesInput { ///

ListFileShareOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFileSharesOutput { ///

An array of information about the file gateway's file shares.

#[serde(rename = "FileShareInfoList")] @@ -1596,7 +1607,7 @@ pub struct ListGatewaysInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGatewaysOutput { ///

An array of GatewayInfo objects.

#[serde(rename = "Gateways")] @@ -1616,7 +1627,7 @@ pub struct ListLocalDisksInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLocalDisksOutput { ///

A JSON object containing the following fields:

#[serde(rename = "Disks")] @@ -1645,7 +1656,7 @@ pub struct ListTagsForResourceInput { ///

ListTagsForResourceOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTagsForResourceOutput { ///

An opaque string that indicates the position at which to stop returning the list of tags.

#[serde(rename = "Marker")] @@ -1679,7 +1690,7 @@ pub struct ListTapesInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTapesOutput { ///

A string that indicates the position at which to begin returning the next list of tapes. Use the marker in your next request to continue pagination of tapes. If there are no more tapes to list, this element does not appear in the response body.

#[serde(rename = "Marker")] @@ -1700,7 +1711,7 @@ pub struct ListVolumeInitiatorsInput { ///

ListVolumeInitiatorsOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVolumeInitiatorsOutput { ///

The host names and port numbers of all iSCSI initiators that are connected to the gateway.

#[serde(rename = "Initiators")] @@ -1715,7 +1726,7 @@ pub struct ListVolumeRecoveryPointsInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVolumeRecoveryPointsOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1744,7 +1755,7 @@ pub struct ListVolumesInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVolumesOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1782,7 +1793,7 @@ pub struct NFSFileShareDefaults { ///

The Unix file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NFSFileShareInfo { #[serde(rename = "ClientList")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1848,7 +1859,7 @@ pub struct NFSFileShareInfo { ///

Describes a gateway's network interface.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NetworkInterface { ///

The Internet Protocol version 4 (IPv4) address of the interface.

#[serde(rename = "Ipv4Address")] @@ -1871,7 +1882,7 @@ pub struct NotifyWhenUploadedInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct NotifyWhenUploadedOutput { #[serde(rename = "FileShareARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1899,7 +1910,7 @@ pub struct RefreshCacheInput { ///

RefreshCacheOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RefreshCacheOutput { #[serde(rename = "FileShareARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1922,7 +1933,7 @@ pub struct RemoveTagsFromResourceInput { ///

RemoveTagsFromResourceOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RemoveTagsFromResourceOutput { ///

The Amazon Resource Name (ARN) of the resource that the tags were removed from.

#[serde(rename = "ResourceARN")] @@ -1937,7 +1948,7 @@ pub struct ResetCacheInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetCacheOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1947,7 +1958,7 @@ pub struct ResetCacheOutput { ///

RetrieveTapeArchiveInput

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RetrieveTapeArchiveInput { - ///

The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and region.

You retrieve archived virtual tapes to only one gateway and the gateway must be a tape gateway.

+ ///

The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

You retrieve archived virtual tapes to only one gateway and the gateway must be a tape gateway.

#[serde(rename = "GatewayARN")] pub gateway_arn: String, ///

The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from the virtual tape shelf (VTS).

@@ -1957,7 +1968,7 @@ pub struct RetrieveTapeArchiveInput { ///

RetrieveTapeArchiveOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RetrieveTapeArchiveOutput { ///

The Amazon Resource Name (ARN) of the retrieved virtual tape.

#[serde(rename = "TapeARN")] @@ -1977,7 +1988,7 @@ pub struct RetrieveTapeRecoveryPointInput { ///

RetrieveTapeRecoveryPointOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RetrieveTapeRecoveryPointOutput { ///

The Amazon Resource Name (ARN) of the virtual tape for which the recovery point was retrieved.

#[serde(rename = "TapeARN")] @@ -1987,7 +1998,7 @@ pub struct RetrieveTapeRecoveryPointOutput { ///

The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported for file gateways.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SMBFileShareInfo { ///

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

#[serde(rename = "AdminUserList")] @@ -2073,7 +2084,7 @@ pub struct SetLocalConsolePasswordInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetLocalConsolePasswordOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2092,7 +2103,7 @@ pub struct SetSMBGuestPasswordInput { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SetSMBGuestPasswordOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2108,7 +2119,7 @@ pub struct ShutdownGatewayInput { ///

A JSON object containing the of the gateway that was shut down.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ShutdownGatewayOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2124,7 +2135,7 @@ pub struct StartGatewayInput { ///

A JSON object containing the of the gateway that was restarted.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartGatewayOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2142,7 +2153,7 @@ pub struct StorageGatewayError { ///

Describes an iSCSI stored volume.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StorediSCSIVolume { ///

The date the volume was created. Volumes created prior to March 28, 2017 don’t have this time stamp.

#[serde(rename = "CreatedDate")] @@ -2218,7 +2229,7 @@ pub struct Tag { ///

Describes a virtual tape object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Tape { #[serde(rename = "KMSKey")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2263,7 +2274,7 @@ pub struct Tape { ///

Represents a virtual tape that is archived in the virtual tape shelf (VTS).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TapeArchive { ///

The time that the archiving of the virtual tape was completed.

The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

#[serde(rename = "CompletionTime")] @@ -2308,9 +2319,9 @@ pub struct TapeArchive { ///

Describes a virtual tape.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TapeInfo { - ///

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

+ ///

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

#[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] pub gateway_arn: Option, @@ -2338,7 +2349,7 @@ pub struct TapeInfo { ///

Describes a recovery point.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TapeRecoveryPointInfo { ///

The Amazon Resource Name (ARN) of the virtual tape.

#[serde(rename = "TapeARN")] @@ -2375,7 +2386,7 @@ pub struct UpdateBandwidthRateLimitInput { ///

A JSON object containing the of the gateway whose throttle information was updated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateBandwidthRateLimitOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2402,7 +2413,7 @@ pub struct UpdateChapCredentialsInput { ///

A JSON object containing the following fields:

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateChapCredentialsOutput { ///

The iSCSI initiator that connects to the target. This is the same initiator name specified in the request.

#[serde(rename = "InitiatorName")] @@ -2416,6 +2427,10 @@ pub struct UpdateChapCredentialsOutput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateGatewayInformationInput { + ///

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

For more information, see What Is Amazon CloudWatch Logs?.

+ #[serde(rename = "CloudWatchLogGroupARN")] + #[serde(skip_serializing_if = "Option::is_none")] + pub cloud_watch_log_group_arn: Option, #[serde(rename = "GatewayARN")] pub gateway_arn: String, #[serde(rename = "GatewayName")] @@ -2429,7 +2444,7 @@ pub struct UpdateGatewayInformationInput { ///

A JSON object containing the ARN of the gateway that was updated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGatewayInformationOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2449,7 +2464,7 @@ pub struct UpdateGatewaySoftwareNowInput { ///

A JSON object containing the of the gateway that was updated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGatewaySoftwareNowOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2479,7 +2494,7 @@ pub struct UpdateMaintenanceStartTimeInput { ///

A JSON object containing the of the gateway whose maintenance start time is updated.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMaintenanceStartTimeOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2536,7 +2551,7 @@ pub struct UpdateNFSFileShareInput { ///

UpdateNFSFileShareOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateNFSFileShareOutput { ///

The Amazon Resource Name (ARN) of the updated file share.

#[serde(rename = "FileShareARN")] @@ -2547,7 +2562,7 @@ pub struct UpdateNFSFileShareOutput { ///

UpdateSMBFileShareInput

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateSMBFileShareInput { - ///

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

+ ///

A list of users in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

#[serde(rename = "AdminUserList")] #[serde(skip_serializing_if = "Option::is_none")] pub admin_user_list: Option>, @@ -2598,7 +2613,7 @@ pub struct UpdateSMBFileShareInput { ///

UpdateSMBFileShareOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSMBFileShareOutput { ///

The Amazon Resource Name (ARN) of the updated SMB file share.

#[serde(rename = "FileShareARN")] @@ -2610,13 +2625,13 @@ pub struct UpdateSMBFileShareOutput { pub struct UpdateSMBSecurityStrategyInput { #[serde(rename = "GatewayARN")] pub gateway_arn: String, - ///

Specifies the type of security strategy.

ClientSpecified: SMBv1 is enabled, SMB signing is offered but not required, SMB encryption is offered but not required.

MandatorySigning: SMBv1 is disabled, SMB signing is required, SMB encryption is offered but not required.

MandatoryEncryption: SMBv1 is disabled, SMB signing is offered but not required, SMB encryption is required.

+ ///

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

#[serde(rename = "SMBSecurityStrategy")] pub smb_security_strategy: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSMBSecurityStrategyOutput { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2647,7 +2662,7 @@ pub struct UpdateSnapshotScheduleInput { ///

A JSON object containing the of the updated storage volume.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSnapshotScheduleOutput { ///

The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

#[serde(rename = "VolumeARN")] @@ -2667,7 +2682,7 @@ pub struct UpdateVTLDeviceTypeInput { ///

UpdateVTLDeviceTypeOutput

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateVTLDeviceTypeOutput { ///

The Amazon Resource Name (ARN) of the medium changer you have selected.

#[serde(rename = "VTLDeviceARN")] @@ -2677,7 +2692,7 @@ pub struct UpdateVTLDeviceTypeOutput { ///

Represents a device object associated with a tape gateway.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VTLDevice { ///

A list of iSCSI information about a VTL device.

#[serde(rename = "DeviceiSCSIAttributes")] @@ -2703,7 +2718,7 @@ pub struct VTLDevice { ///

Describes a storage volume object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VolumeInfo { #[serde(rename = "GatewayARN")] #[serde(skip_serializing_if = "Option::is_none")] @@ -2736,7 +2751,7 @@ pub struct VolumeInfo { ///

Describes a storage volume recovery point object.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VolumeRecoveryPointInfo { ///

The Amazon Resource Name (ARN) of the volume target.

#[serde(rename = "VolumeARN")] @@ -2758,7 +2773,7 @@ pub struct VolumeRecoveryPointInfo { ///

Lists iSCSI information about a volume.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VolumeiSCSIAttributes { ///

Indicates whether mutual CHAP is enabled for the iSCSI target.

#[serde(rename = "ChapEnabled")] @@ -5871,7 +5886,7 @@ impl Error for UpdateVTLDeviceTypeError { } /// Trait representing the capabilities of the AWS Storage Gateway API. AWS Storage Gateway clients implement this trait. pub trait StorageGateway { - ///

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the region you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

+ ///

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

fn activate_gateway( &self, input: ActivateGatewayInput, @@ -5928,7 +5943,7 @@ pub trait StorageGateway { input: CreateCachediSCSIVolumeInput, ) -> RusotoFuture; - ///

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

+ ///

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

fn create_nfs_file_share( &self, input: CreateNFSFileShareInput, @@ -6147,7 +6162,7 @@ pub trait StorageGateway { input: ListFileSharesInput, ) -> RusotoFuture; - ///

Lists gateways owned by an AWS account in a region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

+ ///

Lists gateways owned by an AWS account in an AWS Region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

fn list_gateways( &self, input: ListGatewaysInput, @@ -6186,7 +6201,7 @@ pub trait StorageGateway { input: ListVolumesInput, ) -> RusotoFuture; - ///

Sends you notification through CloudWatch Events when all files written to your NFS file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the NFS file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

+ ///

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

fn notify_when_uploaded( &self, input: NotifyWhenUploadedInput, @@ -6288,7 +6303,7 @@ pub trait StorageGateway { input: UpdateSMBFileShareInput, ) -> RusotoFuture; - ///

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

+ ///

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

This API is called Security level in the User Guide.

A higher security level can affect performance of the gateway.

fn update_smb_security_strategy( &self, input: UpdateSMBSecurityStrategyInput, @@ -6318,10 +6333,7 @@ impl StorageGatewayClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> StorageGatewayClient { - StorageGatewayClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -6335,15 +6347,19 @@ impl StorageGatewayClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - StorageGatewayClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> StorageGatewayClient { + StorageGatewayClient { client, region } } } impl StorageGateway for StorageGatewayClient { - ///

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the region you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

+ ///

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

fn activate_gateway( &self, input: ActivateGatewayInput, @@ -6629,7 +6645,7 @@ impl StorageGateway for StorageGatewayClient { }) } - ///

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

+ ///

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

fn create_nfs_file_share( &self, input: CreateNFSFileShareInput, @@ -7696,7 +7712,7 @@ impl StorageGateway for StorageGatewayClient { }) } - ///

Lists gateways owned by an AWS account in a region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

+ ///

Lists gateways owned by an AWS account in an AWS Region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

fn list_gateways( &self, input: ListGatewaysInput, @@ -7899,7 +7915,7 @@ impl StorageGateway for StorageGatewayClient { }) } - ///

Sends you notification through CloudWatch Events when all files written to your NFS file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the NFS file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

+ ///

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

fn notify_when_uploaded( &self, input: NotifyWhenUploadedInput, @@ -8400,7 +8416,7 @@ impl StorageGateway for StorageGatewayClient { }) } - ///

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

+ ///

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

This API is called Security level in the User Guide.

A higher security level can affect performance of the gateway.

fn update_smb_security_strategy( &self, input: UpdateSMBSecurityStrategyInput, diff --git a/rusoto/services/storagegateway/src/lib.rs b/rusoto/services/storagegateway/src/lib.rs index f562b897c7d..6fdbb03d961 100644 --- a/rusoto/services/storagegateway/src/lib.rs +++ b/rusoto/services/storagegateway/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

+//!

AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

//! //! If you're using the service, you're probably looking for [StorageGatewayClient](struct.StorageGatewayClient.html) and [StorageGateway](trait.StorageGateway.html). diff --git a/rusoto/services/sts/Cargo.toml b/rusoto/services/sts/Cargo.toml index d6716b10f35..c31dd1989bc 100644 --- a/rusoto/services/sts/Cargo.toml +++ b/rusoto/services/sts/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_sts" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -18,18 +18,20 @@ exclude = ["test_resources/*"] bytes = "0.4.12" chrono = "0.4.0" futures = "0.1.16" -serde_urlencoded = "0.5" -xml-rs = "0.7" +serde_urlencoded = "0.6" +xml-rs = "0.8" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/sts/README.md b/rusoto/services/sts/README.md index 160ea010975..0a775037473 100644 --- a/rusoto/services/sts/README.md +++ b/rusoto/services/sts/README.md @@ -23,9 +23,16 @@ To use `rusoto_sts` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_sts = "0.40.0" +rusoto_sts = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/sts/src/custom/credential.rs b/rusoto/services/sts/src/custom/credential.rs index 3a902f6d866..3ecba81911e 100644 --- a/rusoto/services/sts/src/custom/credential.rs +++ b/rusoto/services/sts/src/custom/credential.rs @@ -4,8 +4,6 @@ use futures::{Async, Future, Poll}; use rusoto_core; -use rusoto_core::credential::AwsCredentials; -use rusoto_core::{CredentialsError, ProvideAwsCredentials, RusotoFuture}; use crate::{ AssumeRoleError, AssumeRoleRequest, AssumeRoleResponse, AssumeRoleWithSAMLError, AssumeRoleWithSAMLRequest, AssumeRoleWithSAMLResponse, AssumeRoleWithWebIdentityError, @@ -16,23 +14,31 @@ use crate::{ GetFederationTokenResponse, GetSessionTokenError, GetSessionTokenRequest, GetSessionTokenResponse, Sts, StsClient, }; +use rusoto_core::credential::AwsCredentials; +use rusoto_core::{CredentialsError, ProvideAwsCredentials, RusotoFuture}; -pub const DEFAULT_DURATION_SECONDS: i32 = 3600; -pub const DEFAULT_ROLE_DURATION_SECONDS: i32 = 900; +pub const DEFAULT_DURATION_SECONDS: i64 = 3600; +pub const DEFAULT_ROLE_DURATION_SECONDS: i64 = 900; /// Trait for conversions from STS Credentials to AWS Credentials. pub trait NewAwsCredsForStsCreds { /// Creates an [AwsCredentials](../rusoto_credential/struct.AwsCredentials.html) from a [Credentials](struct.Credentials.html) /// Returns a [CredentialsError](../rusoto_credential/struct.CredentialsError.html) in case of an error. - fn new_for_credentials(sts_creds: crate::generated::Credentials) -> Result; + fn new_for_credentials( + sts_creds: crate::generated::Credentials, + ) -> Result; } impl NewAwsCredsForStsCreds for AwsCredentials { - fn new_for_credentials(sts_creds: crate::generated::Credentials) -> Result { - let expires_at = Some(sts_creds - .expiration - .parse::>() - .map_err(CredentialsError::from)?); + fn new_for_credentials( + sts_creds: crate::generated::Credentials, + ) -> Result { + let expires_at = Some( + sts_creds + .expiration + .parse::>() + .map_err(CredentialsError::from)?, + ); Ok(AwsCredentials::new( sts_creds.access_key_id, @@ -163,8 +169,8 @@ impl StsSessionCredentialsProvider { StsSessionCredentialsProvider { sts_client: Box::new(sts_client), session_duration: duration - .unwrap_or(Duration::seconds(DEFAULT_DURATION_SECONDS as i64)), - mfa_serial: mfa_serial, + .unwrap_or_else(|| Duration::seconds(DEFAULT_DURATION_SECONDS)), + mfa_serial, mfa_code: None, } } @@ -189,7 +195,6 @@ impl StsSessionCredentialsProvider { serial_number: self.mfa_serial.clone(), token_code: self.mfa_code.clone(), duration_seconds: Some(self.session_duration.num_seconds() as i64), - ..Default::default() }; StsSessionCredentialsProviderFuture { inner: self.sts_client.get_session_token(request), @@ -210,11 +215,9 @@ impl Future for StsSessionCredentialsProviderFuture { Ok(Async::Ready(resp)) => { let creds = resp .credentials - .ok_or(CredentialsError::new("no credentials in response"))?; + .ok_or_else(|| CredentialsError::new("no credentials in response"))?; - Ok(Async::Ready(AwsCredentials::new_for_credentials( - creds - )?)) + Ok(Async::Ready(AwsCredentials::new_for_credentials(creds)?)) } Ok(Async::NotReady) => Ok(Async::NotReady), Err(err) => Err(CredentialsError::new(format!( @@ -271,13 +274,13 @@ impl StsAssumeRoleSessionCredentialsProvider { ) -> StsAssumeRoleSessionCredentialsProvider { StsAssumeRoleSessionCredentialsProvider { sts_client: Box::new(sts_client), - role_arn: role_arn, - session_name: session_name, - external_id: external_id, + role_arn, + session_name, + external_id, session_duration: session_duration - .unwrap_or(Duration::seconds(DEFAULT_ROLE_DURATION_SECONDS as i64)), - scope_down_policy: scope_down_policy, - mfa_serial: mfa_serial, + .unwrap_or_else(|| Duration::seconds(DEFAULT_ROLE_DURATION_SECONDS)), + scope_down_policy, + mfa_serial, mfa_code: None, } } @@ -327,11 +330,9 @@ impl Future for StsAssumeRoleSessionCredentialsProviderFuture { Ok(Async::Ready(resp)) => { let creds = resp .credentials - .ok_or(CredentialsError::new("no credentials in response"))?; + .ok_or_else(|| CredentialsError::new("no credentials in response"))?; - Ok(Async::Ready(AwsCredentials::new_for_credentials( - creds - )?)) + Ok(Async::Ready(AwsCredentials::new_for_credentials(creds)?)) } Ok(Async::NotReady) => Ok(Async::NotReady), Err(err) => Err(CredentialsError::new(format!( @@ -384,13 +385,13 @@ impl StsWebIdentityFederationSessionCredentialsProvider { ) -> StsWebIdentityFederationSessionCredentialsProvider { StsWebIdentityFederationSessionCredentialsProvider { sts_client: Box::new(sts_client), - wif_token: wif_token, - wif_provider: wif_provider, - role_arn: role_arn, - session_name: session_name, + wif_token, + wif_provider, + role_arn, + session_name, session_duration: session_duration - .unwrap_or(Duration::seconds(DEFAULT_DURATION_SECONDS as i64)), - scope_down_policy: scope_down_policy, + .unwrap_or_else(|| Duration::seconds(DEFAULT_DURATION_SECONDS)), + scope_down_policy, } } @@ -426,7 +427,7 @@ impl Future for StsWebIdentityFederationSessionCredentialsProviderFuture { Ok(Async::Ready(resp)) => { let creds = resp .credentials - .ok_or(CredentialsError::new("no credentials in response"))?; + .ok_or_else(|| CredentialsError::new("no credentials in response"))?; let mut aws_creds = AwsCredentials::new_for_credentials(creds)?; diff --git a/rusoto/services/sts/src/generated.rs b/rusoto/services/sts/src/generated.rs index 563b90f0ec9..d58b39d4896 100644 --- a/rusoto/services/sts/src/generated.rs +++ b/rusoto/services/sts/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; @@ -86,7 +85,7 @@ pub struct AssumeRoleRequest { pub external_id: Option, ///

An IAM policy in JSON format that you want to use as an inline session policy.

This parameter is optional. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

The plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII character from the space character to the end of the valid character list (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage return (\u000D) characters.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

pub policy: Option, - ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as managed session policies. The policies must exist in the same account as the role.

This parameter is optional. You can provide up to 10 managed policy ARNs. However, the plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

+ ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as managed session policies. The policies must exist in the same account as the role.

This parameter is optional. You can provide up to 10 managed policy ARNs. However, the plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

pub policy_arns: Option>, ///

The Amazon Resource Name (ARN) of the role to assume.

pub role_arn: String, @@ -185,7 +184,7 @@ pub struct AssumeRoleWithSAMLRequest { pub duration_seconds: Option, ///

An IAM policy in JSON format that you want to use as an inline session policy.

This parameter is optional. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

The plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII character from the space character to the end of the valid character list (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage return (\u000D) characters.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

pub policy: Option, - ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as managed session policies. The policies must exist in the same account as the role.

This parameter is optional. You can provide up to 10 managed policy ARNs. However, the plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

+ ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as managed session policies. The policies must exist in the same account as the role.

This parameter is optional. You can provide up to 10 managed policy ARNs. However, the plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

pub policy_arns: Option>, ///

The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP.

pub principal_arn: String, @@ -308,7 +307,7 @@ pub struct AssumeRoleWithWebIdentityRequest { pub duration_seconds: Option, ///

An IAM policy in JSON format that you want to use as an inline session policy.

This parameter is optional. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

The plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII character from the space character to the end of the valid character list (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage return (\u000D) characters.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

pub policy: Option, - ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as managed session policies. The policies must exist in the same account as the role.

This parameter is optional. You can provide up to 10 managed policy ARNs. However, the plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

+ ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as managed session policies. The policies must exist in the same account as the role.

This parameter is optional. You can provide up to 10 managed policy ARNs. However, the plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent AWS API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

pub policy_arns: Option>, ///

The fully qualified host component of the domain name of the identity provider.

Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com and graph.facebook.com are the only supported identity providers for OAuth 2.0 access tokens. Do not include URL schemes and port numbers.

Do not specify this value for OpenID Connect ID tokens.

pub provider_id: Option, @@ -637,6 +636,53 @@ impl FederatedUserDeserializer { } } #[derive(Default, Debug, Clone, PartialEq)] +pub struct GetAccessKeyInfoRequest { + ///

The identifier of an access key.

This parameter allows (through its regex pattern) a string of characters that can consist of any upper- or lowercased letter or digit.

+ pub access_key_id: String, +} + +/// Serialize `GetAccessKeyInfoRequest` contents to a `SignedRequest`. +struct GetAccessKeyInfoRequestSerializer; +impl GetAccessKeyInfoRequestSerializer { + fn serialize(params: &mut Params, name: &str, obj: &GetAccessKeyInfoRequest) { + let mut prefix = name.to_string(); + if prefix != "" { + prefix.push_str("."); + } + + params.put(&format!("{}{}", prefix, "AccessKeyId"), &obj.access_key_id); + } +} + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct GetAccessKeyInfoResponse { + ///

The number used to identify the AWS account.

+ pub account: Option, +} + +struct GetAccessKeyInfoResponseDeserializer; +impl GetAccessKeyInfoResponseDeserializer { + #[allow(unused_variables)] + fn deserialize( + tag_name: &str, + stack: &mut T, + ) -> Result { + deserialize_elements::<_, GetAccessKeyInfoResponse, _>( + tag_name, + stack, + |name, stack, obj| { + match name { + "Account" => { + obj.account = Some(AccountTypeDeserializer::deserialize("Account", stack)?); + } + _ => skip_tree(stack), + } + Ok(()) + }, + ) + } +} +#[derive(Default, Debug, Clone, PartialEq)] pub struct GetCallerIdentityRequest {} /// Serialize `GetCallerIdentityRequest` contents to a `SignedRequest`. @@ -697,7 +743,7 @@ pub struct GetFederationTokenRequest { pub name: String, ///

An IAM policy in JSON format that you want to use as an inline session policy.

You must pass an inline or managed session policy to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policies to use as managed session policies.

This parameter is optional. However, if you do not pass any session policies, then the resulting federated user session has no permissions. The only exception is when the credentials are used to access a resource that has a resource-based policy that specifically references the federated user session in the Principal element of the policy.

When you pass session policies, the session permissions are the intersection of the IAM user policies and the session policies that you pass. This gives you a way to further restrict the permissions for a federated user. You cannot use session policies to grant more permissions than those that are defined in the permissions policy of the IAM user. For more information, see Session Policies in the IAM User Guide.

The plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII character from the space character to the end of the valid character list (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage return (\u000D) characters.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

pub policy: Option, - ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as a managed session policy. The policies must exist in the same account as the IAM user that is requesting federated access.

You must pass an inline or managed session policy to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policies to use as managed session policies. The plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. You can provide up to 10 managed policy ARNs. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

This parameter is optional. However, if you do not pass any session policies, then the resulting federated user session has no permissions. The only exception is when the credentials are used to access a resource that has a resource-based policy that specifically references the federated user session in the Principal element of the policy.

When you pass session policies, the session permissions are the intersection of the IAM user policies and the session policies that you pass. This gives you a way to further restrict the permissions for a federated user. You cannot use session policies to grant more permissions than those that are defined in the permissions policy of the IAM user. For more information, see Session Policies in the IAM User Guide.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

+ ///

The Amazon Resource Names (ARNs) of the IAM managed policies that you want to use as a managed session policy. The policies must exist in the same account as the IAM user that is requesting federated access.

You must pass an inline or managed session policy to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policies to use as managed session policies. The plain text that you use for both inline and managed session policies shouldn't exceed 2048 characters. You can provide up to 10 managed policy ARNs. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

This parameter is optional. However, if you do not pass any session policies, then the resulting federated user session has no permissions. The only exception is when the credentials are used to access a resource that has a resource-based policy that specifically references the federated user session in the Principal element of the policy.

When you pass session policies, the session permissions are the intersection of the IAM user policies and the session policies that you pass. This gives you a way to further restrict the permissions for a federated user. You cannot use session policies to grant more permissions than those that are defined in the permissions policy of the IAM user. For more information, see Session Policies in the IAM User Guide.

The characters in this parameter count towards the 2048 character session policy guideline. However, an AWS conversion compresses the session policies into a packed binary format that has a separate limit. This is the enforced limit. The PackedPolicySize response element indicates by percentage how close the policy is to the upper size limit.

pub policy_arns: Option>, } @@ -883,7 +929,7 @@ impl PolicyDescriptorListTypeSerializer { ///

A reference to the IAM managed policy that is passed as a session policy for a role session or a federated user session.

#[derive(Default, Debug, Clone, PartialEq)] pub struct PolicyDescriptorType { - ///

The Amazon Resource Name (ARN) of the IAM managed policy to use as a session policy for the role. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

+ ///

The Amazon Resource Name (ARN) of the IAM managed policy to use as a session policy for the role. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

pub arn: Option, } @@ -1261,6 +1307,43 @@ impl Error for DecodeAuthorizationMessageError { } } } +/// Errors returned by GetAccessKeyInfo +#[derive(Debug, PartialEq)] +pub enum GetAccessKeyInfoError {} + +impl GetAccessKeyInfoError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + { + let reader = EventReader::new(res.body.as_ref()); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + find_start_element(&mut stack); + if let Ok(parsed_error) = Self::deserialize(&mut stack) { + match &parsed_error.code[..] { + _ => {} + } + } + } + RusotoError::Unknown(res) + } + + fn deserialize(stack: &mut T) -> Result + where + T: Peek + Next, + { + start_element("ErrorResponse", stack)?; + XmlErrorDeserializer::deserialize("Error", stack) + } +} +impl fmt::Display for GetAccessKeyInfoError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetAccessKeyInfoError { + fn description(&self) -> &str { + match *self {} + } +} /// Errors returned by GetCallerIdentity #[derive(Debug, PartialEq)] pub enum GetCallerIdentityError {} @@ -1434,7 +1517,13 @@ pub trait Sts { input: DecodeAuthorizationMessageRequest, ) -> RusotoFuture; - ///

Returns details about the IAM identity whose credentials are used to call the API.

+ ///

Returns the account identifier for the specified access key ID.

Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For more information about access keys, see Managing Access Keys for IAM Users in the IAM User Guide.

When you pass an access key ID to this operation, it returns the ID of the AWS account to which the keys belong. Access key IDs beginning with AKIA are long-term credentials for an IAM user or the AWS account root user. Access key IDs beginning with ASIA are temporary credentials that are created using STS operations. If the account in the response belongs to you, you can sign in as the root user and review your root user access keys. Then, you can pull a credentials report to learn which IAM user owns the keys. To learn who requested the temporary credentials for an ASIA access key, view the STS events in your CloudTrail logs.

This operation does not indicate the state of the access key. The key might be active, inactive, or deleted. Active keys might not have permissions to perform an operation. Providing a deleted access key might return an error that the key doesn't exist.

+ fn get_access_key_info( + &self, + input: GetAccessKeyInfoRequest, + ) -> RusotoFuture; + + ///

Returns details about the IAM user or role whose credentials are used to call the operation.

No permissions are required to perform this operation. If an administrator adds a policy to your IAM user or role that explicitly denies access to the sts:GetCallerIdentity action, you can still perform this operation. Permissions are not required because the same information is returned when an IAM user or role is denied access. To view an example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice.

fn get_caller_identity( &self, input: GetCallerIdentityRequest, @@ -1464,10 +1553,7 @@ impl StsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> StsClient { - StsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1481,10 +1567,14 @@ impl StsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - StsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> StsClient { + StsClient { client, region } } } @@ -1521,7 +1611,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1572,7 +1662,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1620,7 +1710,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1668,7 +1758,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1687,7 +1777,58 @@ impl Sts for StsClient { }) } - ///

Returns details about the IAM identity whose credentials are used to call the API.

+ ///

Returns the account identifier for the specified access key ID.

Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For more information about access keys, see Managing Access Keys for IAM Users in the IAM User Guide.

When you pass an access key ID to this operation, it returns the ID of the AWS account to which the keys belong. Access key IDs beginning with AKIA are long-term credentials for an IAM user or the AWS account root user. Access key IDs beginning with ASIA are temporary credentials that are created using STS operations. If the account in the response belongs to you, you can sign in as the root user and review your root user access keys. Then, you can pull a credentials report to learn which IAM user owns the keys. To learn who requested the temporary credentials for an ASIA access key, view the STS events in your CloudTrail logs.

This operation does not indicate the state of the access key. The key might be active, inactive, or deleted. Active keys might not have permissions to perform an operation. Providing a deleted access key might return an error that the key doesn't exist.

+ fn get_access_key_info( + &self, + input: GetAccessKeyInfoRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "sts", &self.region, "/"); + let mut params = Params::new(); + + params.put("Action", "GetAccessKeyInfo"); + params.put("Version", "2011-06-15"); + GetAccessKeyInfoRequestSerializer::serialize(&mut params, "", &input); + request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); + request.set_content_type("application/x-www-form-urlencoded".to_owned()); + + self.client.sign_and_dispatch(request, |response| { + if !response.status.is_success() { + return Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(GetAccessKeyInfoError::from_response(response))), + ); + } + + Box::new(response.buffer().from_err().and_then(move |response| { + let result; + + if response.body.is_empty() { + result = GetAccessKeyInfoResponse::default(); + } else { + let reader = EventReader::new_with_config( + response.body.as_ref(), + ParserConfig::new().trim_whitespace(false), + ); + let mut stack = XmlResponse::new(reader.into_iter().peekable()); + let _start_document = stack.next(); + let actual_tag_name = peek_at_name(&mut stack)?; + start_element(&actual_tag_name, &mut stack)?; + result = GetAccessKeyInfoResponseDeserializer::deserialize( + "GetAccessKeyInfoResult", + &mut stack, + )?; + skip_tree(&mut stack); + end_element(&actual_tag_name, &mut stack)?; + } + // parse non-payload + Ok(result) + })) + }) + } + + ///

Returns details about the IAM user or role whose credentials are used to call the operation.

No permissions are required to perform this operation. If an administrator adds a policy to your IAM user or role that explicitly denies access to the sts:GetCallerIdentity action, you can still perform this operation. Permissions are not required because the same information is returned when an IAM user or role is denied access. To view an example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice.

fn get_caller_identity( &self, input: GetCallerIdentityRequest, @@ -1719,7 +1860,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1770,7 +1911,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -1821,7 +1962,7 @@ impl Sts for StsClient { } else { let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true), + ParserConfig::new().trim_whitespace(false), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); diff --git a/rusoto/services/support/Cargo.toml b/rusoto/services/support/Cargo.toml index 72c8ce95911..4f895f31f91 100644 --- a/rusoto/services/support/Cargo.toml +++ b/rusoto/services/support/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_support" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/support/README.md b/rusoto/services/support/README.md index bc13e2c97e9..3fafb970dbe 100644 --- a/rusoto/services/support/README.md +++ b/rusoto/services/support/README.md @@ -23,9 +23,16 @@ To use `rusoto_support` in your application, add it as a dependency in your `Car ```toml [dependencies] -rusoto_support = "0.40.0" +rusoto_support = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/support/src/custom/mod.rs b/rusoto/services/support/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/support/src/custom/mod.rs +++ b/rusoto/services/support/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/support/src/generated.rs b/rusoto/services/support/src/generated.rs index 520f5eb58f4..6bef7747e93 100644 --- a/rusoto/services/support/src/generated.rs +++ b/rusoto/services/support/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -38,7 +37,7 @@ pub struct AddAttachmentsToSetRequest { ///

The ID and expiry time of the attachment set returned by the AddAttachmentsToSet operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddAttachmentsToSetResponse { ///

The ID of the attachment set. If an attachmentSetId was not specified, a new attachment set is created, and the ID of the set is returned in the response. If an attachmentSetId was specified, the attachments are added to the specified set, if it exists.

#[serde(rename = "attachmentSetId")] @@ -72,7 +71,7 @@ pub struct AddCommunicationToCaseRequest { ///

The result of the AddCommunicationToCase operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddCommunicationToCaseResponse { ///

True if AddCommunicationToCase succeeds. Otherwise, returns an error.

#[serde(rename = "result")] @@ -100,7 +99,7 @@ pub struct Attachment { ///

The file name and ID of an attachment to a case communication. You can use the ID to retrieve the attachment with the DescribeAttachment operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AttachmentDetails { ///

The ID of the attachment.

#[serde(rename = "attachmentId")] @@ -114,7 +113,7 @@ pub struct AttachmentDetails { ///

A JSON-formatted object that contains the metadata for a support case. It is contained the response from a DescribeCases request. CaseDetails contains the following fields:

  • caseId. The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.

  • categoryCode. The category of problem for the AWS Support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.

  • displayId. The identifier for the case on pages in the AWS Support Center.

  • language. The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ("en") and Japanese ("ja"). Language parameters must be passed explicitly for operations that take them.

  • recentCommunications. One or more Communication objects. Fields of these objects are attachments, body, caseId, submittedBy, and timeCreated.

  • nextToken. A resumption point for pagination.

  • serviceCode. The identifier for the AWS service that corresponds to the service code defined in the call to DescribeServices.

  • severityCode. The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels.

  • status. The status of the case in the AWS Support Center.

  • subject. The subject line of the case.

  • submittedBy. The email address of the account that submitted the case.

  • timeCreated. The time the case was created, in ISO-8601 format.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CaseDetails { ///

The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

#[serde(rename = "caseId")] @@ -168,7 +167,7 @@ pub struct CaseDetails { ///

A JSON-formatted name/value pair that represents the category name and category code of the problem, selected from the DescribeServices response for each AWS service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Category { ///

The category code for the support case.

#[serde(rename = "code")] @@ -182,7 +181,7 @@ pub struct Category { ///

A communication associated with an AWS Support case. The communication consists of the case ID, the message body, attachment information, the account email address, and the date and time of the communication.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Communication { ///

Information about the attachments to the case communication.

#[serde(rename = "attachmentSet")] @@ -247,7 +246,7 @@ pub struct CreateCaseRequest { ///

The AWS Support case ID returned by a successful completion of the CreateCase operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCaseResponse { ///

The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

#[serde(rename = "caseId")] @@ -264,7 +263,7 @@ pub struct DescribeAttachmentRequest { ///

The content and file name of the attachment returned by the DescribeAttachment operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAttachmentResponse { ///

The attachment content and file name.

#[serde(rename = "attachment")] @@ -315,7 +314,7 @@ pub struct DescribeCasesRequest { ///

Returns an array of CaseDetails objects and a nextToken that defines a point for pagination in the result set.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCasesResponse { ///

The details for the cases that match the request.

#[serde(rename = "cases")] @@ -353,7 +352,7 @@ pub struct DescribeCommunicationsRequest { ///

The communications returned by the DescribeCommunications operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCommunicationsResponse { ///

The communications for the case.

#[serde(rename = "communications")] @@ -380,7 +379,7 @@ pub struct DescribeServicesRequest { ///

The list of AWS services returned by the DescribeServices operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeServicesResponse { ///

A JSON-formatted list of AWS services.

#[serde(rename = "services")] @@ -399,7 +398,7 @@ pub struct DescribeSeverityLevelsRequest { ///

The list of severity levels returned by the DescribeSeverityLevels operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeSeverityLevelsResponse { ///

The available severity levels for the support case. Available severity levels are defined by your service level agreement with AWS.

#[serde(rename = "severityLevels")] @@ -417,7 +416,7 @@ pub struct DescribeTrustedAdvisorCheckRefreshStatusesRequest { ///

The statuses of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckRefreshStatuses operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrustedAdvisorCheckRefreshStatusesResponse { ///

The refresh status of the specified Trusted Advisor checks.

#[serde(rename = "statuses")] @@ -438,7 +437,7 @@ pub struct DescribeTrustedAdvisorCheckResultRequest { ///

The result of the Trusted Advisor check returned by the DescribeTrustedAdvisorCheckResult operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrustedAdvisorCheckResultResponse { ///

The detailed results of the Trusted Advisor check.

#[serde(rename = "result")] @@ -456,7 +455,7 @@ pub struct DescribeTrustedAdvisorCheckSummariesRequest { ///

The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrustedAdvisorCheckSummariesResponse { ///

The summary information for the requested Trusted Advisor checks.

#[serde(rename = "summaries")] @@ -473,7 +472,7 @@ pub struct DescribeTrustedAdvisorChecksRequest { ///

Information about the Trusted Advisor checks returned by the DescribeTrustedAdvisorChecks operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTrustedAdvisorChecksResponse { ///

Information about all available Trusted Advisor checks.

#[serde(rename = "checks")] @@ -482,7 +481,7 @@ pub struct DescribeTrustedAdvisorChecksResponse { ///

The five most recent communications associated with the case.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecentCaseCommunications { ///

The five most recent communications associated with the case.

#[serde(rename = "communications")] @@ -504,7 +503,7 @@ pub struct RefreshTrustedAdvisorCheckRequest { ///

The current refresh status of a Trusted Advisor check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RefreshTrustedAdvisorCheckResponse { ///

The current refresh status for a check, including the amount of time until the check is eligible for refresh.

#[serde(rename = "status")] @@ -522,7 +521,7 @@ pub struct ResolveCaseRequest { ///

The status of the case returned by the ResolveCase operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResolveCaseResponse { ///

The status of the case after the ResolveCase request was processed.

#[serde(rename = "finalCaseStatus")] @@ -536,7 +535,7 @@ pub struct ResolveCaseResponse { ///

Information about an AWS service returned by the DescribeServices operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Service { ///

A list of categories that describe the type of support issue a case describes. Categories consist of a category name and a category code. Category names and codes are passed to AWS Support when you call CreateCase.

#[serde(rename = "categories")] @@ -554,7 +553,7 @@ pub struct Service { ///

A code and name pair that represent a severity level that can be applied to a support case.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SeverityLevel { ///

One of four values: "low," "medium," "high," and "urgent". These values correspond to response times returned to the caller in severityLevel.name.

#[serde(rename = "code")] @@ -568,7 +567,7 @@ pub struct SeverityLevel { ///

The container for summary information that relates to the category of the Trusted Advisor check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorCategorySpecificSummary { ///

The summary information about cost savings for a Trusted Advisor check that is in the Cost Optimizing category.

#[serde(rename = "costOptimizing")] @@ -578,7 +577,7 @@ pub struct TrustedAdvisorCategorySpecificSummary { ///

The description and metadata for a Trusted Advisor check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorCheckDescription { ///

The category of the Trusted Advisor check.

#[serde(rename = "category")] @@ -599,7 +598,7 @@ pub struct TrustedAdvisorCheckDescription { ///

The refresh status of a Trusted Advisor check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorCheckRefreshStatus { ///

The unique identifier for the Trusted Advisor check.

#[serde(rename = "checkId")] @@ -614,7 +613,7 @@ pub struct TrustedAdvisorCheckRefreshStatus { ///

The results of a Trusted Advisor check returned by DescribeTrustedAdvisorCheckResult.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorCheckResult { ///

Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.

#[serde(rename = "categorySpecificSummary")] @@ -637,7 +636,7 @@ pub struct TrustedAdvisorCheckResult { ///

A summary of a Trusted Advisor check result, including the alert status, last refresh, and number of resources examined.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorCheckSummary { ///

Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.

#[serde(rename = "categorySpecificSummary")] @@ -661,7 +660,7 @@ pub struct TrustedAdvisorCheckSummary { ///

The estimated cost savings that might be realized if the recommended actions are taken.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorCostOptimizingSummary { ///

The estimated monthly savings that might be realized if the recommended actions are taken.

#[serde(rename = "estimatedMonthlySavings")] @@ -673,7 +672,7 @@ pub struct TrustedAdvisorCostOptimizingSummary { ///

Contains information about a resource identified by a Trusted Advisor check.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorResourceDetail { ///

Specifies whether the AWS resource was ignored by Trusted Advisor because it was marked as suppressed by the user.

#[serde(rename = "isSuppressed")] @@ -696,7 +695,7 @@ pub struct TrustedAdvisorResourceDetail { ///

Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TrustedAdvisorResourcesSummary { ///

The number of AWS resources that were flagged (listed) by the Trusted Advisor check.

#[serde(rename = "resourcesFlagged")] @@ -1429,10 +1428,7 @@ impl AWSSupportClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> AWSSupportClient { - AWSSupportClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -1446,10 +1442,14 @@ impl AWSSupportClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - AWSSupportClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> AWSSupportClient { + AWSSupportClient { client, region } } } diff --git a/rusoto/services/swf/Cargo.toml b/rusoto/services/swf/Cargo.toml index 91145010573..1c6c7ae100d 100644 --- a/rusoto/services/swf/Cargo.toml +++ b/rusoto/services/swf/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_swf" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/swf/README.md b/rusoto/services/swf/README.md index 6490953bc4f..719579f2d7b 100644 --- a/rusoto/services/swf/README.md +++ b/rusoto/services/swf/README.md @@ -23,9 +23,16 @@ To use `rusoto_swf` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_swf = "0.40.0" +rusoto_swf = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/swf/src/custom/mod.rs b/rusoto/services/swf/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/swf/src/custom/mod.rs +++ b/rusoto/services/swf/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/swf/src/generated.rs b/rusoto/services/swf/src/generated.rs index bd9ebe2438c..8b1fe1a6fa9 100644 --- a/rusoto/services/swf/src/generated.rs +++ b/rusoto/services/swf/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Unit of work sent to an activity worker.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTask { ///

The unique ID of the task.

#[serde(rename = "activityId")] @@ -51,7 +50,7 @@ pub struct ActivityTask { ///

Provides the details of the ActivityTaskCancelRequested event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskCancelRequestedEventAttributes { ///

The unique ID of the task.

#[serde(rename = "activityId")] @@ -63,7 +62,7 @@ pub struct ActivityTaskCancelRequestedEventAttributes { ///

Provides the details of the ActivityTaskCanceled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskCanceledEventAttributes { ///

Details of the cancellation.

#[serde(rename = "details")] @@ -83,7 +82,7 @@ pub struct ActivityTaskCanceledEventAttributes { ///

Provides the details of the ActivityTaskCompleted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskCompletedEventAttributes { ///

The results of the activity task.

#[serde(rename = "result")] @@ -99,7 +98,7 @@ pub struct ActivityTaskCompletedEventAttributes { ///

Provides the details of the ActivityTaskFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskFailedEventAttributes { ///

The details of the failure.

#[serde(rename = "details")] @@ -119,7 +118,7 @@ pub struct ActivityTaskFailedEventAttributes { ///

Provides the details of the ActivityTaskScheduled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskScheduledEventAttributes { ///

The unique ID of the activity task.

#[serde(rename = "activityId")] @@ -157,7 +156,7 @@ pub struct ActivityTaskScheduledEventAttributes { ///

The task list in which the activity task has been scheduled.

#[serde(rename = "taskList")] pub task_list: TaskList, - ///

The priority to assign to the scheduled activity task. If set, this overrides any default priority value that was assigned when the activity type was registered.

Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The priority to assign to the scheduled activity task. If set, this overrides any default priority value that was assigned when the activity type was registered.

Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -165,7 +164,7 @@ pub struct ActivityTaskScheduledEventAttributes { ///

Provides the details of the ActivityTaskStarted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskStartedEventAttributes { ///

Identity of the worker that was assigned this task. This aids diagnostics when problems arise. The form of this identity is user defined.

#[serde(rename = "identity")] @@ -178,7 +177,7 @@ pub struct ActivityTaskStartedEventAttributes { ///

Status information about an activity task.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskStatus { ///

Set to true if cancellation of the task is requested.

#[serde(rename = "cancelRequested")] @@ -187,7 +186,7 @@ pub struct ActivityTaskStatus { ///

Provides the details of the ActivityTaskTimedOut event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTaskTimedOutEventAttributes { ///

Contains the content of the details parameter for the last call made by the activity to RecordActivityTaskHeartbeat.

#[serde(rename = "details")] @@ -217,7 +216,7 @@ pub struct ActivityType { ///

Configuration settings registered with the activity type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTypeConfiguration { ///

The default maximum time, in seconds, before which a worker processing a task must report progress by calling RecordActivityTaskHeartbeat.

You can specify this value only when registering an activity type. The registered default value can be overridden when you schedule a task through the ScheduleActivityTask Decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

#[serde(rename = "defaultTaskHeartbeatTimeout")] @@ -227,7 +226,7 @@ pub struct ActivityTypeConfiguration { #[serde(rename = "defaultTaskList")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_list: Option, - ///

The default task priority for tasks of this activity type, specified at registration. If not set, then 0 is used as the default priority. This default can be overridden when scheduling an activity task.

Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The default task priority for tasks of this activity type, specified at registration. If not set, then 0 is used as the default priority. This default can be overridden when scheduling an activity task.

Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "defaultTaskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_priority: Option, @@ -247,7 +246,7 @@ pub struct ActivityTypeConfiguration { ///

Detailed information about an activity type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTypeDetail { ///

The configuration settings registered with the activity type.

#[serde(rename = "configuration")] @@ -259,7 +258,7 @@ pub struct ActivityTypeDetail { ///

Detailed information about an activity type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTypeInfo { ///

The ActivityType type structure representing the activity type.

#[serde(rename = "activityType")] @@ -282,7 +281,7 @@ pub struct ActivityTypeInfo { ///

Contains a paginated list of activity type information structures.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivityTypeInfos { ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] @@ -293,7 +292,7 @@ pub struct ActivityTypeInfos { pub type_infos: Vec, } -///

Provides the details of the CancelTimer decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the CancelTimer decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CancelTimerDecisionAttributes { ///

The unique ID of the timer to cancel.

@@ -303,9 +302,9 @@ pub struct CancelTimerDecisionAttributes { ///

Provides the details of the CancelTimerFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelTimerFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelTimer decision to cancel this timer. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -316,7 +315,7 @@ pub struct CancelTimerFailedEventAttributes { pub timer_id: String, } -///

Provides the details of the CancelWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the CancelWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CancelWorkflowExecutionDecisionAttributes { ///

Details of the cancellation.

@@ -327,9 +326,9 @@ pub struct CancelWorkflowExecutionDecisionAttributes { ///

Provides the details of the CancelWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CancelWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -339,7 +338,7 @@ pub struct CancelWorkflowExecutionFailedEventAttributes { ///

Provide details of the ChildWorkflowExecutionCanceled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChildWorkflowExecutionCanceledEventAttributes { ///

Details of the cancellation (if provided).

#[serde(rename = "details")] @@ -361,7 +360,7 @@ pub struct ChildWorkflowExecutionCanceledEventAttributes { ///

Provides the details of the ChildWorkflowExecutionCompleted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChildWorkflowExecutionCompletedEventAttributes { ///

The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution Decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "initiatedEventId")] @@ -383,7 +382,7 @@ pub struct ChildWorkflowExecutionCompletedEventAttributes { ///

Provides the details of the ChildWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChildWorkflowExecutionFailedEventAttributes { ///

The details of the failure (if provided).

#[serde(rename = "details")] @@ -409,7 +408,7 @@ pub struct ChildWorkflowExecutionFailedEventAttributes { ///

Provides the details of the ChildWorkflowExecutionStarted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChildWorkflowExecutionStartedEventAttributes { ///

The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution Decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "initiatedEventId")] @@ -424,7 +423,7 @@ pub struct ChildWorkflowExecutionStartedEventAttributes { ///

Provides the details of the ChildWorkflowExecutionTerminated event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChildWorkflowExecutionTerminatedEventAttributes { ///

The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution Decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "initiatedEventId")] @@ -442,7 +441,7 @@ pub struct ChildWorkflowExecutionTerminatedEventAttributes { ///

Provides the details of the ChildWorkflowExecutionTimedOut event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ChildWorkflowExecutionTimedOutEventAttributes { ///

The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution Decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "initiatedEventId")] @@ -469,7 +468,7 @@ pub struct CloseStatusFilter { pub status: String, } -///

Provides the details of the CompleteWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the CompleteWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CompleteWorkflowExecutionDecisionAttributes { ///

The result of the workflow execution. The form of the result is implementation defined.

@@ -480,9 +479,9 @@ pub struct CompleteWorkflowExecutionDecisionAttributes { ///

Provides the details of the CompleteWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CompleteWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CompleteWorkflowExecution decision to complete this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -490,7 +489,7 @@ pub struct CompleteWorkflowExecutionFailedEventAttributes { pub decision_task_completed_event_id: i64, } -///

Provides the details of the ContinueAsNewWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tag – A tag used to identify the workflow execution

    • taskList – String constraint. The key is swf:taskList.name.

    • workflowType.version – String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the ContinueAsNewWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tag – A tag used to identify the workflow execution

    • taskList – String constraint. The key is swf:taskList.name.

    • workflowType.version – String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ContinueAsNewWorkflowExecutionDecisionAttributes { ///

If set, specifies the policy to use for the child workflow executions of the new execution if it is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.

@@ -517,7 +516,7 @@ pub struct ContinueAsNewWorkflowExecutionDecisionAttributes { #[serde(rename = "taskList")] #[serde(skip_serializing_if = "Option::is_none")] pub task_list: Option, - ///

The task priority that, if set, specifies the priority for the decision tasks for this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The task priority that, if set, specifies the priority for the decision tasks for this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -533,9 +532,9 @@ pub struct ContinueAsNewWorkflowExecutionDecisionAttributes { ///

Provides the details of the ContinueAsNewWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ContinueAsNewWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the ContinueAsNewWorkflowExecution decision that started this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -616,7 +615,7 @@ pub struct CountPendingDecisionTasksInput { pub task_list: TaskList, } -///

Specifies a decision made by the decider. A decision can be one of these types:

  • CancelTimer – Cancels a previously started timer and records a TimerCanceled event in the history.

  • CancelWorkflowExecution – Closes the workflow execution and records a WorkflowExecutionCanceled event in the history.

  • CompleteWorkflowExecution – Closes the workflow execution and records a WorkflowExecutionCompleted event in the history .

  • ContinueAsNewWorkflowExecution – Closes the workflow execution and starts a new workflow execution of the same type using the same workflow ID and a unique run Id. A WorkflowExecutionContinuedAsNew event is recorded in the history.

  • FailWorkflowExecution – Closes the workflow execution and records a WorkflowExecutionFailed event in the history.

  • RecordMarker – Records a MarkerRecorded event in the history. Markers can be used for adding custom information in the history for instance to let deciders know that they don't need to look at the history beyond the marker event.

  • RequestCancelActivityTask – Attempts to cancel a previously scheduled activity task. If the activity task was scheduled but has not been assigned to a worker, then it is canceled. If the activity task was already assigned to a worker, then the worker is informed that cancellation has been requested in the response to RecordActivityTaskHeartbeat.

  • RequestCancelExternalWorkflowExecution – Requests that a request be made to cancel the specified external workflow execution and records a RequestCancelExternalWorkflowExecutionInitiated event in the history.

  • ScheduleActivityTask – Schedules an activity task.

  • SignalExternalWorkflowExecution – Requests a signal to be delivered to the specified external workflow execution and records a SignalExternalWorkflowExecutionInitiated event in the history.

  • StartChildWorkflowExecution – Requests that a child workflow execution be started and records a StartChildWorkflowExecutionInitiated event in the history. The child workflow execution is a separate workflow execution with its own history.

  • StartTimer – Starts a timer for this workflow execution and records a TimerStarted event in the history. This timer fires after the specified delay and record a TimerFired event.

Access Control

If you grant permission to use RespondDecisionTaskCompleted, you can use IAM policies to express permissions for the list of decisions returned by this action as if they were members of the API. Treating decisions as a pseudo API maintains a uniform conceptual model and helps keep policies readable. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

Decision Failure

Decisions can fail for several reasons

  • The ordering of decisions should follow a logical flow. Some decisions might not make sense in the current context of the workflow execution and therefore fails.

  • A limit on your account was reached.

  • The decision lacks sufficient permissions.

One of the following events might be added to the history to indicate an error. The event attribute's cause parameter indicates the cause. If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

  • ScheduleActivityTaskFailed – A ScheduleActivityTask decision failed. This could happen if the activity type specified in the decision isn't registered, is in a deprecated state, or the decision isn't properly configured.

  • RequestCancelActivityTaskFailed – A RequestCancelActivityTask decision failed. This could happen if there is no open activity task with the specified activityId.

  • StartTimerFailed – A StartTimer decision failed. This could happen if there is another open timer with the same timerId.

  • CancelTimerFailed – A CancelTimer decision failed. This could happen if there is no open timer with the specified timerId.

  • StartChildWorkflowExecutionFailed – A StartChildWorkflowExecution decision failed. This could happen if the workflow type specified isn't registered, is deprecated, or the decision isn't properly configured.

  • SignalExternalWorkflowExecutionFailed – A SignalExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.

  • RequestCancelExternalWorkflowExecutionFailed – A RequestCancelExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.

  • CancelWorkflowExecutionFailed – A CancelWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

  • CompleteWorkflowExecutionFailed – A CompleteWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

  • ContinueAsNewWorkflowExecutionFailed – A ContinueAsNewWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution or the ContinueAsNewWorkflowExecution decision was not configured correctly.

  • FailWorkflowExecutionFailed – A FailWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

The preceding error events might occur due to an error in the decider logic, which might put the workflow execution in an unstable state The cause field in the event structure for the error event indicates the cause of the error.

A workflow execution may be closed by the decider by returning one of the following decisions when completing a decision task: CompleteWorkflowExecution, FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. An UnhandledDecision fault is returned if a workflow closing decision is specified and a signal or activity event had been added to the history while the decision task was being performed by the decider. Unlike the above situations which are logic issues, this fault is always possible because of race conditions in a distributed system. The right action here is to call RespondDecisionTaskCompleted without any decisions. This would result in another decision task with these new events included in the history. The decider should handle the new events and may decide to close the workflow execution.

How to Code a Decision

You code a decision by first setting the decision type field to one of the above decision values, and then set the corresponding attributes field shown below:

+///

Specifies a decision made by the decider. A decision can be one of these types:

  • CancelTimer – Cancels a previously started timer and records a TimerCanceled event in the history.

  • CancelWorkflowExecution – Closes the workflow execution and records a WorkflowExecutionCanceled event in the history.

  • CompleteWorkflowExecution – Closes the workflow execution and records a WorkflowExecutionCompleted event in the history .

  • ContinueAsNewWorkflowExecution – Closes the workflow execution and starts a new workflow execution of the same type using the same workflow ID and a unique run Id. A WorkflowExecutionContinuedAsNew event is recorded in the history.

  • FailWorkflowExecution – Closes the workflow execution and records a WorkflowExecutionFailed event in the history.

  • RecordMarker – Records a MarkerRecorded event in the history. Markers can be used for adding custom information in the history for instance to let deciders know that they don't need to look at the history beyond the marker event.

  • RequestCancelActivityTask – Attempts to cancel a previously scheduled activity task. If the activity task was scheduled but has not been assigned to a worker, then it is canceled. If the activity task was already assigned to a worker, then the worker is informed that cancellation has been requested in the response to RecordActivityTaskHeartbeat.

  • RequestCancelExternalWorkflowExecution – Requests that a request be made to cancel the specified external workflow execution and records a RequestCancelExternalWorkflowExecutionInitiated event in the history.

  • ScheduleActivityTask – Schedules an activity task.

  • SignalExternalWorkflowExecution – Requests a signal to be delivered to the specified external workflow execution and records a SignalExternalWorkflowExecutionInitiated event in the history.

  • StartChildWorkflowExecution – Requests that a child workflow execution be started and records a StartChildWorkflowExecutionInitiated event in the history. The child workflow execution is a separate workflow execution with its own history.

  • StartTimer – Starts a timer for this workflow execution and records a TimerStarted event in the history. This timer fires after the specified delay and record a TimerFired event.

Access Control

If you grant permission to use RespondDecisionTaskCompleted, you can use IAM policies to express permissions for the list of decisions returned by this action as if they were members of the API. Treating decisions as a pseudo API maintains a uniform conceptual model and helps keep policies readable. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

Decision Failure

Decisions can fail for several reasons

  • The ordering of decisions should follow a logical flow. Some decisions might not make sense in the current context of the workflow execution and therefore fails.

  • A limit on your account was reached.

  • The decision lacks sufficient permissions.

One of the following events might be added to the history to indicate an error. The event attribute's cause parameter indicates the cause. If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

  • ScheduleActivityTaskFailed – A ScheduleActivityTask decision failed. This could happen if the activity type specified in the decision isn't registered, is in a deprecated state, or the decision isn't properly configured.

  • RequestCancelActivityTaskFailed – A RequestCancelActivityTask decision failed. This could happen if there is no open activity task with the specified activityId.

  • StartTimerFailed – A StartTimer decision failed. This could happen if there is another open timer with the same timerId.

  • CancelTimerFailed – A CancelTimer decision failed. This could happen if there is no open timer with the specified timerId.

  • StartChildWorkflowExecutionFailed – A StartChildWorkflowExecution decision failed. This could happen if the workflow type specified isn't registered, is deprecated, or the decision isn't properly configured.

  • SignalExternalWorkflowExecutionFailed – A SignalExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.

  • RequestCancelExternalWorkflowExecutionFailed – A RequestCancelExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.

  • CancelWorkflowExecutionFailed – A CancelWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

  • CompleteWorkflowExecutionFailed – A CompleteWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

  • ContinueAsNewWorkflowExecutionFailed – A ContinueAsNewWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution or the ContinueAsNewWorkflowExecution decision was not configured correctly.

  • FailWorkflowExecutionFailed – A FailWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

The preceding error events might occur due to an error in the decider logic, which might put the workflow execution in an unstable state The cause field in the event structure for the error event indicates the cause of the error.

A workflow execution may be closed by the decider by returning one of the following decisions when completing a decision task: CompleteWorkflowExecution, FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. An UnhandledDecision fault is returned if a workflow closing decision is specified and a signal or activity event had been added to the history while the decision task was being performed by the decider. Unlike the above situations which are logic issues, this fault is always possible because of race conditions in a distributed system. The right action here is to call RespondDecisionTaskCompleted without any decisions. This would result in another decision task with these new events included in the history. The decider should handle the new events and may decide to close the workflow execution.

How to Code a Decision

You code a decision by first setting the decision type field to one of the above decision values, and then set the corresponding attributes field shown below:

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct Decision { ///

Provides the details of the CancelTimer decision. It isn't set for other decision types.

@@ -687,7 +686,7 @@ pub struct Decision { ///

A structure that represents a decision task. Decision tasks are sent to deciders in order for them to make decisions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecisionTask { ///

A paginated list of history events of the workflow execution. The decider uses this during the processing of the decision task.

#[serde(rename = "events")] @@ -716,7 +715,7 @@ pub struct DecisionTask { ///

Provides the details of the DecisionTaskCompleted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecisionTaskCompletedEventAttributes { ///

User defined context for the workflow execution.

#[serde(rename = "executionContext")] @@ -732,7 +731,7 @@ pub struct DecisionTaskCompletedEventAttributes { ///

Provides details about the DecisionTaskScheduled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecisionTaskScheduledEventAttributes { ///

The maximum duration for this decision task. The task is considered timed out if it doesn't completed within this duration.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

#[serde(rename = "startToCloseTimeout")] @@ -741,7 +740,7 @@ pub struct DecisionTaskScheduledEventAttributes { ///

The name of the task list in which the decision task was scheduled.

#[serde(rename = "taskList")] pub task_list: TaskList, - ///

A task priority that, if set, specifies the priority for this decision task. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

A task priority that, if set, specifies the priority for this decision task. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -749,7 +748,7 @@ pub struct DecisionTaskScheduledEventAttributes { ///

Provides the details of the DecisionTaskStarted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecisionTaskStartedEventAttributes { ///

Identity of the decider making the request. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

#[serde(rename = "identity")] @@ -762,7 +761,7 @@ pub struct DecisionTaskStartedEventAttributes { ///

Provides the details of the DecisionTaskTimedOut event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DecisionTaskTimedOutEventAttributes { ///

The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "scheduledEventId")] @@ -841,7 +840,7 @@ pub struct DescribeWorkflowTypeInput { ///

Contains the configuration settings of a domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainConfiguration { ///

The retention period for workflow executions in this domain.

#[serde(rename = "workflowExecutionRetentionPeriodInDays")] @@ -850,7 +849,7 @@ pub struct DomainConfiguration { ///

Contains details of a domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainDetail { ///

The domain configuration. Currently, this includes only the domain's retention period.

#[serde(rename = "configuration")] @@ -862,8 +861,12 @@ pub struct DomainDetail { ///

Contains general information about a domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainInfo { + ///

The ARN of the domain.

+ #[serde(rename = "arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, ///

The description of the domain provided through RegisterDomain.

#[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] @@ -878,7 +881,7 @@ pub struct DomainInfo { ///

Contains a paginated collection of DomainInfo structures.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainInfos { ///

A list of DomainInfo structures.

#[serde(rename = "domainInfos")] @@ -903,7 +906,7 @@ pub struct ExecutionTimeFilter { ///

Provides the details of the ExternalWorkflowExecutionCancelRequested event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExternalWorkflowExecutionCancelRequestedEventAttributes { ///

The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this external workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "initiatedEventId")] @@ -915,7 +918,7 @@ pub struct ExternalWorkflowExecutionCancelRequestedEventAttributes { ///

Provides the details of the ExternalWorkflowExecutionSignaled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ExternalWorkflowExecutionSignaledEventAttributes { ///

The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflowExecution decision to request this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "initiatedEventId")] @@ -925,7 +928,7 @@ pub struct ExternalWorkflowExecutionSignaledEventAttributes { pub workflow_execution: WorkflowExecution, } -///

Provides the details of the FailWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the FailWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct FailWorkflowExecutionDecisionAttributes { ///

Details of the failure.

@@ -940,9 +943,9 @@ pub struct FailWorkflowExecutionDecisionAttributes { ///

Provides the details of the FailWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the FailWorkflowExecution decision to fail this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -958,11 +961,11 @@ pub struct GetWorkflowExecutionHistoryInput { ///

Specifies the workflow execution for which to return the history.

#[serde(rename = "execution")] pub execution: WorkflowExecution, - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -974,7 +977,7 @@ pub struct GetWorkflowExecutionHistoryInput { ///

Paginated representation of a workflow history for a workflow execution. This is the up to date, complete and authoritative record of the events related to all tasks and events in the life of the workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct History { ///

The list of history events.

#[serde(rename = "events")] @@ -987,7 +990,7 @@ pub struct History { ///

Event within a workflow execution. A history event can be one of these types:

  • ActivityTaskCancelRequested – A RequestCancelActivityTask decision was received by the system.

  • ActivityTaskCanceled – The activity task was successfully canceled.

  • ActivityTaskCompleted – An activity worker successfully completed an activity task by calling RespondActivityTaskCompleted.

  • ActivityTaskFailed – An activity worker failed an activity task by calling RespondActivityTaskFailed.

  • ActivityTaskScheduled – An activity task was scheduled for execution.

  • ActivityTaskStarted – The scheduled activity task was dispatched to a worker.

  • ActivityTaskTimedOut – The activity task timed out.

  • CancelTimerFailed – Failed to process CancelTimer decision. This happens when the decision isn't configured properly, for example no timer exists with the specified timer Id.

  • CancelWorkflowExecutionFailed – A request to cancel a workflow execution failed.

  • ChildWorkflowExecutionCanceled – A child workflow execution, started by this workflow execution, was canceled and closed.

  • ChildWorkflowExecutionCompleted – A child workflow execution, started by this workflow execution, completed successfully and was closed.

  • ChildWorkflowExecutionFailed – A child workflow execution, started by this workflow execution, failed to complete successfully and was closed.

  • ChildWorkflowExecutionStarted – A child workflow execution was successfully started.

  • ChildWorkflowExecutionTerminated – A child workflow execution, started by this workflow execution, was terminated.

  • ChildWorkflowExecutionTimedOut – A child workflow execution, started by this workflow execution, timed out and was closed.

  • CompleteWorkflowExecutionFailed – The workflow execution failed to complete.

  • ContinueAsNewWorkflowExecutionFailed – The workflow execution failed to complete after being continued as a new workflow execution.

  • DecisionTaskCompleted – The decider successfully completed a decision task by calling RespondDecisionTaskCompleted.

  • DecisionTaskScheduled – A decision task was scheduled for the workflow execution.

  • DecisionTaskStarted – The decision task was dispatched to a decider.

  • DecisionTaskTimedOut – The decision task timed out.

  • ExternalWorkflowExecutionCancelRequested – Request to cancel an external workflow execution was successfully delivered to the target execution.

  • ExternalWorkflowExecutionSignaled – A signal, requested by this workflow execution, was successfully delivered to the target external workflow execution.

  • FailWorkflowExecutionFailed – A request to mark a workflow execution as failed, itself failed.

  • MarkerRecorded – A marker was recorded in the workflow history as the result of a RecordMarker decision.

  • RecordMarkerFailed – A RecordMarker decision was returned as failed.

  • RequestCancelActivityTaskFailed – Failed to process RequestCancelActivityTask decision. This happens when the decision isn't configured properly.

  • RequestCancelExternalWorkflowExecutionFailed – Request to cancel an external workflow execution failed.

  • RequestCancelExternalWorkflowExecutionInitiated – A request was made to request the cancellation of an external workflow execution.

  • ScheduleActivityTaskFailed – Failed to process ScheduleActivityTask decision. This happens when the decision isn't configured properly, for example the activity type specified isn't registered.

  • SignalExternalWorkflowExecutionFailed – The request to signal an external workflow execution failed.

  • SignalExternalWorkflowExecutionInitiated – A request to signal an external workflow was made.

  • StartActivityTaskFailed – A scheduled activity task failed to start.

  • StartChildWorkflowExecutionFailed – Failed to process StartChildWorkflowExecution decision. This happens when the decision isn't configured properly, for example the workflow type specified isn't registered.

  • StartChildWorkflowExecutionInitiated – A request was made to start a child workflow execution.

  • StartTimerFailed – Failed to process StartTimer decision. This happens when the decision isn't configured properly, for example a timer already exists with the specified timer Id.

  • TimerCanceled – A timer, previously started for this workflow execution, was successfully canceled.

  • TimerFired – A timer, previously started for this workflow execution, fired.

  • TimerStarted – A timer was started for the workflow execution due to a StartTimer decision.

  • WorkflowExecutionCancelRequested – A request to cancel this workflow execution was made.

  • WorkflowExecutionCanceled – The workflow execution was successfully canceled and closed.

  • WorkflowExecutionCompleted – The workflow execution was closed due to successful completion.

  • WorkflowExecutionContinuedAsNew – The workflow execution was closed and a new execution of the same type was created with the same workflowId.

  • WorkflowExecutionFailed – The workflow execution closed due to a failure.

  • WorkflowExecutionSignaled – An external signal was received for the workflow execution.

  • WorkflowExecutionStarted – The workflow execution was started.

  • WorkflowExecutionTerminated – The workflow execution was terminated.

  • WorkflowExecutionTimedOut – The workflow execution was closed because a time out was exceeded.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HistoryEvent { ///

If the event is of type ActivityTaskcancelRequested then this member is set and provides detailed information about the event. It isn't set for other event types.

#[serde(rename = "activityTaskCancelRequestedEventAttributes")] @@ -1249,7 +1252,7 @@ pub struct HistoryEvent { ///

Provides the details of the LambdaFunctionCompleted event. It isn't set for other event types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionCompletedEventAttributes { ///

The results of the Lambda task.

#[serde(rename = "result")] @@ -1265,7 +1268,7 @@ pub struct LambdaFunctionCompletedEventAttributes { ///

Provides the details of the LambdaFunctionFailed event. It isn't set for other event types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionFailedEventAttributes { ///

The details of the failure.

#[serde(rename = "details")] @@ -1285,7 +1288,7 @@ pub struct LambdaFunctionFailedEventAttributes { ///

Provides the details of the LambdaFunctionScheduled event. It isn't set for other event types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionScheduledEventAttributes { ///

Data attached to the event that the decider can use in subsequent workflow tasks. This data isn't sent to the Lambda task.

#[serde(rename = "control")] @@ -1312,7 +1315,7 @@ pub struct LambdaFunctionScheduledEventAttributes { ///

Provides the details of the LambdaFunctionStarted event. It isn't set for other event types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionStartedEventAttributes { ///

The ID of the LambdaFunctionScheduled event that was recorded when this activity task was scheduled. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

#[serde(rename = "scheduledEventId")] @@ -1321,7 +1324,7 @@ pub struct LambdaFunctionStartedEventAttributes { ///

Provides details of the LambdaFunctionTimedOut event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct LambdaFunctionTimedOutEventAttributes { ///

The ID of the LambdaFunctionScheduled event that was recorded when this activity task was scheduled. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

#[serde(rename = "scheduledEventId")] @@ -1340,7 +1343,7 @@ pub struct ListActivityTypesInput { ///

The name of the domain in which the activity types have been registered.

#[serde(rename = "domain")] pub domain: String, - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, @@ -1348,7 +1351,7 @@ pub struct ListActivityTypesInput { #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -1378,11 +1381,11 @@ pub struct ListClosedWorkflowExecutionsInput { #[serde(rename = "executionFilter")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_filter: Option, - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -1406,11 +1409,11 @@ pub struct ListClosedWorkflowExecutionsInput { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListDomainsInput { - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -1432,11 +1435,11 @@ pub struct ListOpenWorkflowExecutionsInput { #[serde(rename = "executionFilter")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_filter: Option, - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -1457,12 +1460,28 @@ pub struct ListOpenWorkflowExecutionsInput { pub type_filter: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceInput { + ///

The Amazon Resource Name (ARN) for the Amazon SWF domain.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceOutput { + ///

An array of tags associated with the domain.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListWorkflowTypesInput { ///

The name of the domain in which the workflow types have been registered.

#[serde(rename = "domain")] pub domain: String, - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, @@ -1470,7 +1489,7 @@ pub struct ListWorkflowTypesInput { #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -1485,7 +1504,7 @@ pub struct ListWorkflowTypesInput { ///

Provides the details of the MarkerRecorded event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct MarkerRecordedEventAttributes { ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RecordMarker decision that requested this marker. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "decisionTaskCompletedEventId")] @@ -1501,7 +1520,7 @@ pub struct MarkerRecordedEventAttributes { ///

Contains the count of tasks in a task list.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PendingTaskCount { ///

The number of tasks in the task list.

#[serde(rename = "count")] @@ -1521,7 +1540,7 @@ pub struct PollForActivityTaskInput { #[serde(rename = "identity")] #[serde(skip_serializing_if = "Option::is_none")] pub identity: Option, - ///

Specifies the task list to poll for activity tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

Specifies the task list to poll for activity tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "taskList")] pub task_list: TaskList, } @@ -1535,11 +1554,11 @@ pub struct PollForDecisionTaskInput { #[serde(rename = "identity")] #[serde(skip_serializing_if = "Option::is_none")] pub identity: Option, - ///

The maximum number of results that are returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

+ ///

The maximum number of results that are returned per call. Use nextPageToken to obtain further pages of results.

This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

#[serde(rename = "maximumPageSize")] #[serde(skip_serializing_if = "Option::is_none")] pub maximum_page_size: Option, - ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory to get the next page. You must call PollForDecisionTask again (with the nextPageToken) to retrieve the next page of history records. Calling PollForDecisionTask with a nextPageToken doesn't return a new decision task.

+ ///

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: "Specified token has exceeded its maximum lifetime".

The configured maximumPageSize determines how many results can be returned in a single call.

The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory to get the next page. You must call PollForDecisionTask again (with the nextPageToken) to retrieve the next page of history records. Calling PollForDecisionTask with a nextPageToken doesn't return a new decision task.

#[serde(rename = "nextPageToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_page_token: Option, @@ -1547,7 +1566,7 @@ pub struct PollForDecisionTaskInput { #[serde(rename = "reverseOrder")] #[serde(skip_serializing_if = "Option::is_none")] pub reverse_order: Option, - ///

Specifies the task list to poll for decision tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

Specifies the task list to poll for decision tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "taskList")] pub task_list: TaskList, } @@ -1563,7 +1582,7 @@ pub struct RecordActivityTaskHeartbeatInput { pub task_token: String, } -///

Provides the details of the RecordMarker decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the RecordMarker decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RecordMarkerDecisionAttributes { ///

The details of the marker.

@@ -1577,9 +1596,9 @@ pub struct RecordMarkerDecisionAttributes { ///

Provides the details of the RecordMarkerFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RecordMarkerFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RecordMarkerFailed decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -1600,7 +1619,7 @@ pub struct RegisterActivityTypeInput { #[serde(rename = "defaultTaskList")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_list: Option, - ///

The default task priority to assign to the activity type. If not assigned, then 0 is used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the in the Amazon SWF Developer Guide..

+ ///

The default task priority to assign to the activity type. If not assigned, then 0 is used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the in the Amazon SWF Developer Guide..

#[serde(rename = "defaultTaskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_priority: Option, @@ -1623,10 +1642,10 @@ pub struct RegisterActivityTypeInput { ///

The name of the domain in which this activity is to be registered.

#[serde(rename = "domain")] pub domain: String, - ///

The name of the activity type within the domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

The name of the activity type within the domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "name")] pub name: String, - ///

The version of the activity type.

The activity type consists of the name and version, the combination of which must be unique within the domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

The version of the activity type.

The activity type consists of the name and version, the combination of which must be unique within the domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "version")] pub version: String, } @@ -1637,10 +1656,14 @@ pub struct RegisterDomainInput { #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, - ///

Name of the domain to register. The name must be unique in the region that the domain is registered in.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

Name of the domain to register. The name must be unique in the region that the domain is registered in.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "name")] pub name: String, - ///

The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution isn't available in the results of visibility calls.

If you pass the value NONE or 0 (zero), then the workflow execution history isn't retained. As soon as the workflow execution completes, the execution record and its history are deleted.

The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: Amazon SWF Service Limits in the Amazon SWF Developer Guide.

+ ///

Tags to be added when registering a domain.

Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @.

+ #[serde(rename = "tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution isn't available in the results of visibility calls.

If you pass the value NONE or 0 (zero), then the workflow execution history isn't retained. As soon as the workflow execution completes, the execution record and its history are deleted.

The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: Amazon SWF Service Limits in the Amazon SWF Developer Guide.

#[serde(rename = "workflowExecutionRetentionPeriodInDays")] pub workflow_execution_retention_period_in_days: String, } @@ -1655,7 +1678,7 @@ pub struct RegisterWorkflowTypeInput { #[serde(rename = "defaultExecutionStartToCloseTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub default_execution_start_to_close_timeout: Option, - ///

The default IAM role attached to this workflow type.

Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when you start this workflow type, the default Lambda role is attached to the execution. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html in the Amazon SWF Developer Guide.

+ ///

The default IAM role attached to this workflow type.

Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when you start this workflow type, the default Lambda role is attached to the execution. For more information, see https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html in the Amazon SWF Developer Guide.

#[serde(rename = "defaultLambdaRole")] #[serde(skip_serializing_if = "Option::is_none")] pub default_lambda_role: Option, @@ -1663,7 +1686,7 @@ pub struct RegisterWorkflowTypeInput { #[serde(rename = "defaultTaskList")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_list: Option, - ///

The default task priority to assign to the workflow type. If not assigned, then 0 is used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The default task priority to assign to the workflow type. If not assigned, then 0 is used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "defaultTaskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_priority: Option, @@ -1678,15 +1701,15 @@ pub struct RegisterWorkflowTypeInput { ///

The name of the domain in which to register the workflow type.

#[serde(rename = "domain")] pub domain: String, - ///

The name of the workflow type.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

The name of the workflow type.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "name")] pub name: String, - ///

The version of the workflow type.

The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

The version of the workflow type.

The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "version")] pub version: String, } -///

Provides the details of the RequestCancelActivityTask decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the RequestCancelActivityTask decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RequestCancelActivityTaskDecisionAttributes { ///

The activityId of the activity task to be canceled.

@@ -1696,12 +1719,12 @@ pub struct RequestCancelActivityTaskDecisionAttributes { ///

Provides the details of the RequestCancelActivityTaskFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestCancelActivityTaskFailedEventAttributes { ///

The activityId provided in the RequestCancelActivityTask decision that failed.

#[serde(rename = "activityId")] pub activity_id: String, - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelActivityTask decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -1709,7 +1732,7 @@ pub struct RequestCancelActivityTaskFailedEventAttributes { pub decision_task_completed_event_id: i64, } -///

Provides the details of the RequestCancelExternalWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the RequestCancelExternalWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RequestCancelExternalWorkflowExecutionDecisionAttributes { ///

The data attached to the event that can be used by the decider in subsequent workflow tasks.

@@ -1727,9 +1750,9 @@ pub struct RequestCancelExternalWorkflowExecutionDecisionAttributes { ///

Provides the details of the RequestCancelExternalWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestCancelExternalWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The data attached to the event that the decider can use in subsequent workflow tasks. This data isn't sent to the workflow execution.

@@ -1753,7 +1776,7 @@ pub struct RequestCancelExternalWorkflowExecutionFailedEventAttributes { ///

Provides the details of the RequestCancelExternalWorkflowExecutionInitiated event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes { ///

Data attached to the event that can be used by the decider in subsequent workflow tasks.

#[serde(rename = "control")] @@ -1785,6 +1808,18 @@ pub struct RequestCancelWorkflowExecutionInput { pub workflow_id: String, } +///

Tags are key-value pairs that can be associated with Amazon SWF state machines and activities.

Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ResourceTag { + ///

The key of a tag.

+ #[serde(rename = "key")] + pub key: String, + ///

The value of a tag.

+ #[serde(rename = "value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RespondActivityTaskCanceledInput { ///

Information about the cancellation.

@@ -1840,7 +1875,7 @@ pub struct RespondDecisionTaskCompletedInput { ///

Specifies the runId of a workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Run { ///

The runId of a workflow execution. This ID is generated by the service and can be used to uniquely identify the workflow execution within a domain.

#[serde(rename = "runId")] @@ -1848,7 +1883,7 @@ pub struct Run { pub run_id: Option, } -///

Provides the details of the ScheduleActivityTask decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name – String constraint. The key is swf:activityType.name.

    • activityType.version – String constraint. The key is swf:activityType.version.

    • taskList – String constraint. The key is swf:taskList.name.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the ScheduleActivityTask decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name – String constraint. The key is swf:activityType.name.

    • activityType.version – String constraint. The key is swf:activityType.version.

    • taskList – String constraint. The key is swf:taskList.name.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ScheduleActivityTaskDecisionAttributes { ///

The activityId of the activity task.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

@@ -1885,7 +1920,7 @@ pub struct ScheduleActivityTaskDecisionAttributes { #[serde(rename = "taskList")] #[serde(skip_serializing_if = "Option::is_none")] pub task_list: Option, - ///

If set, specifies the priority with which the activity task is to be assigned to a worker. This overrides the defaultTaskPriority specified when registering the activity type using RegisterActivityType. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

If set, specifies the priority with which the activity task is to be assigned to a worker. This overrides the defaultTaskPriority specified when registering the activity type using RegisterActivityType. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -1893,7 +1928,7 @@ pub struct ScheduleActivityTaskDecisionAttributes { ///

Provides the details of the ScheduleActivityTaskFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduleActivityTaskFailedEventAttributes { ///

The activityId provided in the ScheduleActivityTask decision that failed.

#[serde(rename = "activityId")] @@ -1901,7 +1936,7 @@ pub struct ScheduleActivityTaskFailedEventAttributes { ///

The activity type provided in the ScheduleActivityTask decision that failed.

#[serde(rename = "activityType")] pub activity_type: ActivityType, - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -1934,9 +1969,9 @@ pub struct ScheduleLambdaFunctionDecisionAttributes { ///

Provides the details of the ScheduleLambdaFunctionFailed event. It isn't set for other event types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ScheduleLambdaFunctionFailedEventAttributes { - ///

The cause of the failure. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the LambdaFunctionCompleted event corresponding to the decision that resulted in scheduling this Lambda task. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

@@ -1950,7 +1985,7 @@ pub struct ScheduleLambdaFunctionFailedEventAttributes { pub name: String, } -///

Provides the details of the SignalExternalWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the SignalExternalWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SignalExternalWorkflowExecutionDecisionAttributes { ///

The data attached to the event that can be used by the decider in subsequent decision tasks.

@@ -1975,9 +2010,9 @@ pub struct SignalExternalWorkflowExecutionDecisionAttributes { ///

Provides the details of the SignalExternalWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SignalExternalWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The data attached to the event that the decider can use in subsequent workflow tasks. This data isn't sent to the workflow execution.

@@ -2001,7 +2036,7 @@ pub struct SignalExternalWorkflowExecutionFailedEventAttributes { ///

Provides the details of the SignalExternalWorkflowExecutionInitiated event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SignalExternalWorkflowExecutionInitiatedEventAttributes { ///

Data attached to the event that can be used by the decider in subsequent decision tasks.

#[serde(rename = "control")] @@ -2047,7 +2082,7 @@ pub struct SignalWorkflowExecutionInput { pub workflow_id: String, } -///

Provides the details of the StartChildWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagList.member.N – The key is "swf:tagList.N" where N is the tag number from 0 to 4, inclusive.

    • taskList – String constraint. The key is swf:taskList.name.

    • workflowType.name – String constraint. The key is swf:workflowType.name.

    • workflowType.version – String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the StartChildWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagList.member.N – The key is "swf:tagList.N" where N is the tag number from 0 to 4, inclusive.

    • taskList – String constraint. The key is swf:taskList.name.

    • workflowType.name – String constraint. The key is swf:workflowType.name.

    • workflowType.version – String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartChildWorkflowExecutionDecisionAttributes { ///

If set, specifies the policy to use for the child workflow executions if the workflow execution being started is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.

@@ -2078,7 +2113,7 @@ pub struct StartChildWorkflowExecutionDecisionAttributes { #[serde(rename = "taskList")] #[serde(skip_serializing_if = "Option::is_none")] pub task_list: Option, - ///

A task priority that, if set, specifies the priority for a decision task of this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

A task priority that, if set, specifies the priority for a decision task of this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -2096,9 +2131,9 @@ pub struct StartChildWorkflowExecutionDecisionAttributes { ///

Provides the details of the StartChildWorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartChildWorkflowExecutionFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

When cause is set to OPERATIONNOTPERMITTED, the decision fails because it lacks sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

When cause is set to OPERATIONNOTPERMITTED, the decision fails because it lacks sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The data attached to the event that the decider can use in subsequent workflow tasks. This data isn't sent to the child workflow execution.

@@ -2121,7 +2156,7 @@ pub struct StartChildWorkflowExecutionFailedEventAttributes { ///

Provides the details of the StartChildWorkflowExecutionInitiated event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartChildWorkflowExecutionInitiatedEventAttributes { ///

The policy to use for the child workflow executions if this execution gets terminated by explicitly calling the TerminateWorkflowExecution action or due to an expired timeout.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

#[serde(rename = "childPolicy")] @@ -2152,7 +2187,7 @@ pub struct StartChildWorkflowExecutionInitiatedEventAttributes { ///

The name of the task list used for the decision tasks of the child workflow execution.

#[serde(rename = "taskList")] pub task_list: TaskList, - ///

The priority assigned for the decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The priority assigned for the decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -2170,9 +2205,9 @@ pub struct StartChildWorkflowExecutionInitiatedEventAttributes { ///

Provides the details of the StartLambdaFunctionFailed event. It isn't set for other event types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartLambdaFunctionFailedEventAttributes { - ///

The cause of the failure. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because the IAM role attached to the execution lacked sufficient permissions. For details and example IAM policies, see Lambda Tasks in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. To help diagnose issues, use this information to trace back the chain of events leading up to this event.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because the IAM role attached to the execution lacked sufficient permissions. For details and example IAM policies, see Lambda Tasks in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] #[serde(skip_serializing_if = "Option::is_none")] pub cause: Option, @@ -2186,7 +2221,7 @@ pub struct StartLambdaFunctionFailedEventAttributes { pub scheduled_event_id: Option, } -///

Provides the details of the StartTimer decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+///

Provides the details of the StartTimer decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StartTimerDecisionAttributes { ///

The data attached to the event that can be used by the decider in subsequent workflow tasks.

@@ -2203,9 +2238,9 @@ pub struct StartTimerDecisionAttributes { ///

Provides the details of the StartTimerFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartTimerFailedEventAttributes { - ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

If cause is set to OPERATIONNOTPERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

#[serde(rename = "cause")] pub cause: String, ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartTimer decision for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

@@ -2233,7 +2268,7 @@ pub struct StartWorkflowExecutionInput { #[serde(rename = "input")] #[serde(skip_serializing_if = "Option::is_none")] pub input: Option, - ///

The IAM role to attach to this workflow execution.

Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't attach an IAM role, any attempt to schedule a Lambda task fails. This results in a ScheduleLambdaFunctionFailed history event. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html in the Amazon SWF Developer Guide.

+ ///

The IAM role to attach to this workflow execution.

Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't attach an IAM role, any attempt to schedule a Lambda task fails. This results in a ScheduleLambdaFunctionFailed history event. For more information, see https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html in the Amazon SWF Developer Guide.

#[serde(rename = "lambdaRole")] #[serde(skip_serializing_if = "Option::is_none")] pub lambda_role: Option, @@ -2241,11 +2276,11 @@ pub struct StartWorkflowExecutionInput { #[serde(rename = "tagList")] #[serde(skip_serializing_if = "Option::is_none")] pub tag_list: Option>, - ///

The task list to use for the decision tasks generated for this workflow execution. This overrides the defaultTaskList specified when registering the workflow type.

A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

The task list to use for the decision tasks generated for this workflow execution. This overrides the defaultTaskList specified when registering the workflow type.

A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "taskList")] #[serde(skip_serializing_if = "Option::is_none")] pub task_list: Option, - ///

The task priority to use for this workflow execution. This overrides any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type is used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The task priority to use for this workflow execution. This overrides any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type is used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -2253,7 +2288,7 @@ pub struct StartWorkflowExecutionInput { #[serde(rename = "taskStartToCloseTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub task_start_to_close_timeout: Option, - ///

The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a restart of a previous execution. You cannot have two open workflow executions with the same workflowId at the same time.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not contain the literal string arn.

+ ///

The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a restart of a previous execution. You cannot have two open workflow executions with the same workflowId at the same time within the same domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it must not be the literal string arn.

#[serde(rename = "workflowId")] pub workflow_id: String, ///

The type of the workflow to start.

@@ -2264,11 +2299,21 @@ pub struct StartWorkflowExecutionInput { ///

Used to filter the workflow executions in visibility APIs based on a tag.

#[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct TagFilter { - ///

Specifies the tag that must be associated with the execution for it to meet the filter criteria.

+ ///

Specifies the tag that must be associated with the execution for it to meet the filter criteria.

Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @.

#[serde(rename = "tag")] pub tag: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceInput { + ///

The Amazon Resource Name (ARN) for the Amazon SWF domain.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

The list of tags to add to a domain.

Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @.

+ #[serde(rename = "tags")] + pub tags: Vec, +} + ///

Represents a task list.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TaskList { @@ -2305,7 +2350,7 @@ pub struct TerminateWorkflowExecutionInput { ///

Provides the details of the TimerCanceled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TimerCanceledEventAttributes { ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelTimer decision to cancel this timer. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "decisionTaskCompletedEventId")] @@ -2320,7 +2365,7 @@ pub struct TimerCanceledEventAttributes { ///

Provides the details of the TimerFired event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TimerFiredEventAttributes { ///

The ID of the TimerStarted event that was recorded when this timer was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "startedEventId")] @@ -2332,7 +2377,7 @@ pub struct TimerFiredEventAttributes { ///

Provides the details of the TimerStarted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TimerStartedEventAttributes { ///

Data attached to the event that can be used by the decider in subsequent workflow tasks.

#[serde(rename = "control")] @@ -2349,6 +2394,43 @@ pub struct TimerStartedEventAttributes { pub timer_id: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UndeprecateActivityTypeInput { + ///

The activity type to undeprecate.

+ #[serde(rename = "activityType")] + pub activity_type: ActivityType, + ///

The name of the domain of the deprecated activity type.

+ #[serde(rename = "domain")] + pub domain: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UndeprecateDomainInput { + ///

The name of the domain of the deprecated workflow type.

+ #[serde(rename = "name")] + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UndeprecateWorkflowTypeInput { + ///

The name of the domain of the deprecated workflow type.

+ #[serde(rename = "domain")] + pub domain: String, + ///

The name of the domain of the deprecated workflow type.

+ #[serde(rename = "workflowType")] + pub workflow_type: WorkflowType, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceInput { + ///

The Amazon Resource Name (ARN) for the Amazon SWF domain.

+ #[serde(rename = "resourceArn")] + pub resource_arn: String, + ///

The list of tags to remove from the Amazon SWF domain.

+ #[serde(rename = "tagKeys")] + pub tag_keys: Vec, +} + ///

Represents a workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct WorkflowExecution { @@ -2362,7 +2444,7 @@ pub struct WorkflowExecution { ///

Provides the details of the WorkflowExecutionCancelRequested event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionCancelRequestedEventAttributes { ///

If set, indicates that the request to cancel the workflow execution was automatically generated, and specifies the cause. This happens if the parent workflow execution times out or is terminated, and the child policy is set to cancel child executions.

#[serde(rename = "cause")] @@ -2380,7 +2462,7 @@ pub struct WorkflowExecutionCancelRequestedEventAttributes { ///

Provides the details of the WorkflowExecutionCanceled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionCanceledEventAttributes { ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "decisionTaskCompletedEventId")] @@ -2393,7 +2475,7 @@ pub struct WorkflowExecutionCanceledEventAttributes { ///

Provides the details of the WorkflowExecutionCompleted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionCompletedEventAttributes { ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CompleteWorkflowExecution decision to complete this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "decisionTaskCompletedEventId")] @@ -2406,7 +2488,7 @@ pub struct WorkflowExecutionCompletedEventAttributes { ///

The configuration settings for a workflow execution including timeout values, tasklist etc. These configuration settings are determined from the defaults specified when registering the workflow type and those specified when starting the workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionConfiguration { ///

The policy to use for the child workflow executions if this workflow execution is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

#[serde(rename = "childPolicy")] @@ -2421,7 +2503,7 @@ pub struct WorkflowExecutionConfiguration { ///

The task list used for the decision tasks generated for this workflow execution.

#[serde(rename = "taskList")] pub task_list: TaskList, - ///

The priority assigned to decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The priority assigned to decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "taskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub task_priority: Option, @@ -2432,7 +2514,7 @@ pub struct WorkflowExecutionConfiguration { ///

Provides the details of the WorkflowExecutionContinuedAsNew event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionContinuedAsNewEventAttributes { ///

The policy to use for the child workflow executions of the new execution if it is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

#[serde(rename = "childPolicy")] @@ -2477,7 +2559,7 @@ pub struct WorkflowExecutionContinuedAsNewEventAttributes { ///

Contains the count of workflow executions returned from CountOpenWorkflowExecutions or CountClosedWorkflowExecutions

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionCount { ///

The number of workflow executions.

#[serde(rename = "count")] @@ -2490,7 +2572,7 @@ pub struct WorkflowExecutionCount { ///

Contains details about a workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionDetail { ///

The configuration settings for this workflow execution including timeout values, tasklist etc.

#[serde(rename = "executionConfiguration")] @@ -2513,7 +2595,7 @@ pub struct WorkflowExecutionDetail { ///

Provides the details of the WorkflowExecutionFailed event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionFailedEventAttributes { ///

The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the FailWorkflowExecution decision to fail this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

#[serde(rename = "decisionTaskCompletedEventId")] @@ -2538,7 +2620,7 @@ pub struct WorkflowExecutionFilter { ///

Contains information about a workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionInfo { ///

Set to true if a cancellation is requested for this workflow execution.

#[serde(rename = "cancelRequested")] @@ -2576,7 +2658,7 @@ pub struct WorkflowExecutionInfo { ///

Contains a paginated list of information about workflow executions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionInfos { ///

The list of workflow information structures.

#[serde(rename = "executionInfos")] @@ -2589,7 +2671,7 @@ pub struct WorkflowExecutionInfos { ///

Contains the counts of open tasks, child workflow executions and timers for a workflow execution.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionOpenCounts { ///

The count of activity tasks whose status is OPEN.

#[serde(rename = "openActivityTasks")] @@ -2611,7 +2693,7 @@ pub struct WorkflowExecutionOpenCounts { ///

Provides the details of the WorkflowExecutionSignaled event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionSignaledEventAttributes { ///

The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflow decision to signal this workflow execution.The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. This field is set only if the signal was initiated by another workflow execution.

#[serde(rename = "externalInitiatedEventId")] @@ -2632,7 +2714,7 @@ pub struct WorkflowExecutionSignaledEventAttributes { ///

Provides details of WorkflowExecutionStarted event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionStartedEventAttributes { ///

The policy to use for the child workflow executions if this workflow execution is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

#[serde(rename = "childPolicy")] @@ -2683,7 +2765,7 @@ pub struct WorkflowExecutionStartedEventAttributes { ///

Provides the details of the WorkflowExecutionTerminated event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionTerminatedEventAttributes { ///

If set, indicates that the workflow execution was automatically terminated, and specifies the cause. This happens if the parent workflow execution times out or is terminated and the child policy is set to terminate child executions.

#[serde(rename = "cause")] @@ -2704,7 +2786,7 @@ pub struct WorkflowExecutionTerminatedEventAttributes { ///

Provides the details of the WorkflowExecutionTimedOut event.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowExecutionTimedOutEventAttributes { ///

The policy used for the child workflow executions of this workflow execution.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

#[serde(rename = "childPolicy")] @@ -2727,7 +2809,7 @@ pub struct WorkflowType { ///

The configuration settings of a workflow type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowTypeConfiguration { ///

The default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution Decision.

The supported child policies are:

  • TERMINATE – The child executions are terminated.

  • REQUEST_CANCEL – A request to cancel is attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.

  • ABANDON – No action is taken. The child executions continue to run.

#[serde(rename = "defaultChildPolicy")] @@ -2737,7 +2819,7 @@ pub struct WorkflowTypeConfiguration { #[serde(rename = "defaultExecutionStartToCloseTimeout")] #[serde(skip_serializing_if = "Option::is_none")] pub default_execution_start_to_close_timeout: Option, - ///

The default IAM role attached to this workflow type.

Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when starting this workflow type, the default Lambda role is attached to the execution. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html in the Amazon SWF Developer Guide.

+ ///

The default IAM role attached to this workflow type.

Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when starting this workflow type, the default Lambda role is attached to the execution. For more information, see https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html in the Amazon SWF Developer Guide.

#[serde(rename = "defaultLambdaRole")] #[serde(skip_serializing_if = "Option::is_none")] pub default_lambda_role: Option, @@ -2745,7 +2827,7 @@ pub struct WorkflowTypeConfiguration { #[serde(rename = "defaultTaskList")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_list: Option, - ///

The default task priority, specified when registering the workflow type, for all decision tasks of this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

+ ///

The default task priority, specified when registering the workflow type, for all decision tasks of this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

For more information about setting task priority, see Setting Task Priority in the Amazon SWF Developer Guide.

#[serde(rename = "defaultTaskPriority")] #[serde(skip_serializing_if = "Option::is_none")] pub default_task_priority: Option, @@ -2757,7 +2839,7 @@ pub struct WorkflowTypeConfiguration { ///

Contains details about a workflow type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowTypeDetail { ///

Configuration settings of the workflow type registered through RegisterWorkflowType

#[serde(rename = "configuration")] @@ -2781,7 +2863,7 @@ pub struct WorkflowTypeFilter { ///

Contains information about a workflow type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowTypeInfo { ///

The date when this type was registered.

#[serde(rename = "creationDate")] @@ -2804,7 +2886,7 @@ pub struct WorkflowTypeInfo { ///

Contains a paginated list of information structures about workflow types.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkflowTypeInfos { ///

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

#[serde(rename = "nextPageToken")] @@ -3527,6 +3609,57 @@ impl Error for ListOpenWorkflowExecutionsError { } } } +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

Returned by any operation if a system imposed limitation has been reached. To address this fault you should either clean up unused resources or increase the limit by contacting AWS.

+ LimitExceededFault(String), + ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

+ OperationNotPermittedFault(String), + ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

+ UnknownResourceFault(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "LimitExceededFault" => { + return RusotoError::Service(ListTagsForResourceError::LimitExceededFault( + err.msg, + )) + } + "OperationNotPermittedFault" => { + return RusotoError::Service( + ListTagsForResourceError::OperationNotPermittedFault(err.msg), + ) + } + "UnknownResourceFault" => { + return RusotoError::Service(ListTagsForResourceError::UnknownResourceFault( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::LimitExceededFault(ref cause) => cause, + ListTagsForResourceError::OperationNotPermittedFault(ref cause) => cause, + ListTagsForResourceError::UnknownResourceFault(ref cause) => cause, + } + } +} /// Errors returned by ListWorkflowTypes #[derive(Debug, PartialEq)] pub enum ListWorkflowTypesError { @@ -3724,7 +3857,7 @@ pub enum RegisterActivityTypeError { LimitExceededFault(String), ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

OperationNotPermittedFault(String), - ///

Returned if the type already exists in the specified domain. You get this fault even if the existing type is in deprecated status. You can specify another version if the intent is to create a new distinct version of the type.

+ ///

Returned if the type already exists in the specified domain. You may get this fault if you are registering a type that is either already registered or deprecated, or if you undeprecate a type that is currently registered.

TypeAlreadyExistsFault(String), ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

UnknownResourceFault(String), @@ -3779,12 +3912,14 @@ impl Error for RegisterActivityTypeError { /// Errors returned by RegisterDomain #[derive(Debug, PartialEq)] pub enum RegisterDomainError { - ///

Returned if the specified domain already exists. You get this fault even if the existing domain is in deprecated status.

+ ///

Returned if the domain already exists. You may get this fault if you are registering a domain that is either already registered or deprecated, or if you undeprecate a domain that is currently registered.

DomainAlreadyExistsFault(String), ///

Returned by any operation if a system imposed limitation has been reached. To address this fault you should either clean up unused resources or increase the limit by contacting AWS.

LimitExceededFault(String), ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

OperationNotPermittedFault(String), + ///

You've exceeded the number of tags allowed for a domain.

+ TooManyTagsFault(String), } impl RegisterDomainError { @@ -3804,6 +3939,9 @@ impl RegisterDomainError { err.msg, )) } + "TooManyTagsFault" => { + return RusotoError::Service(RegisterDomainError::TooManyTagsFault(err.msg)) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3822,6 +3960,7 @@ impl Error for RegisterDomainError { RegisterDomainError::DomainAlreadyExistsFault(ref cause) => cause, RegisterDomainError::LimitExceededFault(ref cause) => cause, RegisterDomainError::OperationNotPermittedFault(ref cause) => cause, + RegisterDomainError::TooManyTagsFault(ref cause) => cause, } } } @@ -3832,7 +3971,7 @@ pub enum RegisterWorkflowTypeError { LimitExceededFault(String), ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

OperationNotPermittedFault(String), - ///

Returned if the type already exists in the specified domain. You get this fault even if the existing type is in deprecated status. You can specify another version if the intent is to create a new distinct version of the type.

+ ///

Returned if the type already exists in the specified domain. You may get this fault if you are registering a type that is either already registered or deprecated, or if you undeprecate a type that is currently registered.

TypeAlreadyExistsFault(String), ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

UnknownResourceFault(String), @@ -4225,6 +4364,59 @@ impl Error for StartWorkflowExecutionError { } } } +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

Returned by any operation if a system imposed limitation has been reached. To address this fault you should either clean up unused resources or increase the limit by contacting AWS.

+ LimitExceededFault(String), + ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

+ OperationNotPermittedFault(String), + ///

You've exceeded the number of tags allowed for a domain.

+ TooManyTagsFault(String), + ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

+ UnknownResourceFault(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "LimitExceededFault" => { + return RusotoError::Service(TagResourceError::LimitExceededFault(err.msg)) + } + "OperationNotPermittedFault" => { + return RusotoError::Service(TagResourceError::OperationNotPermittedFault( + err.msg, + )) + } + "TooManyTagsFault" => { + return RusotoError::Service(TagResourceError::TooManyTagsFault(err.msg)) + } + "UnknownResourceFault" => { + return RusotoError::Service(TagResourceError::UnknownResourceFault(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::LimitExceededFault(ref cause) => cause, + TagResourceError::OperationNotPermittedFault(ref cause) => cause, + TagResourceError::TooManyTagsFault(ref cause) => cause, + TagResourceError::UnknownResourceFault(ref cause) => cause, + } + } +} /// Errors returned by TerminateWorkflowExecution #[derive(Debug, PartialEq)] pub enum TerminateWorkflowExecutionError { @@ -4270,187 +4462,417 @@ impl Error for TerminateWorkflowExecutionError { } } } +/// Errors returned by UndeprecateActivityType +#[derive(Debug, PartialEq)] +pub enum UndeprecateActivityTypeError { + ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

+ OperationNotPermittedFault(String), + ///

Returned if the type already exists in the specified domain. You may get this fault if you are registering a type that is either already registered or deprecated, or if you undeprecate a type that is currently registered.

+ TypeAlreadyExistsFault(String), + ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

+ UnknownResourceFault(String), +} + +impl UndeprecateActivityTypeError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "OperationNotPermittedFault" => { + return RusotoError::Service( + UndeprecateActivityTypeError::OperationNotPermittedFault(err.msg), + ) + } + "TypeAlreadyExistsFault" => { + return RusotoError::Service( + UndeprecateActivityTypeError::TypeAlreadyExistsFault(err.msg), + ) + } + "UnknownResourceFault" => { + return RusotoError::Service( + UndeprecateActivityTypeError::UnknownResourceFault(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UndeprecateActivityTypeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UndeprecateActivityTypeError { + fn description(&self) -> &str { + match *self { + UndeprecateActivityTypeError::OperationNotPermittedFault(ref cause) => cause, + UndeprecateActivityTypeError::TypeAlreadyExistsFault(ref cause) => cause, + UndeprecateActivityTypeError::UnknownResourceFault(ref cause) => cause, + } + } +} +/// Errors returned by UndeprecateDomain +#[derive(Debug, PartialEq)] +pub enum UndeprecateDomainError { + ///

Returned if the domain already exists. You may get this fault if you are registering a domain that is either already registered or deprecated, or if you undeprecate a domain that is currently registered.

+ DomainAlreadyExistsFault(String), + ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

+ OperationNotPermittedFault(String), + ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

+ UnknownResourceFault(String), +} + +impl UndeprecateDomainError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "DomainAlreadyExistsFault" => { + return RusotoError::Service(UndeprecateDomainError::DomainAlreadyExistsFault( + err.msg, + )) + } + "OperationNotPermittedFault" => { + return RusotoError::Service( + UndeprecateDomainError::OperationNotPermittedFault(err.msg), + ) + } + "UnknownResourceFault" => { + return RusotoError::Service(UndeprecateDomainError::UnknownResourceFault( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UndeprecateDomainError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UndeprecateDomainError { + fn description(&self) -> &str { + match *self { + UndeprecateDomainError::DomainAlreadyExistsFault(ref cause) => cause, + UndeprecateDomainError::OperationNotPermittedFault(ref cause) => cause, + UndeprecateDomainError::UnknownResourceFault(ref cause) => cause, + } + } +} +/// Errors returned by UndeprecateWorkflowType +#[derive(Debug, PartialEq)] +pub enum UndeprecateWorkflowTypeError { + ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

+ OperationNotPermittedFault(String), + ///

Returned if the type already exists in the specified domain. You may get this fault if you are registering a type that is either already registered or deprecated, or if you undeprecate a type that is currently registered.

+ TypeAlreadyExistsFault(String), + ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

+ UnknownResourceFault(String), +} + +impl UndeprecateWorkflowTypeError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "OperationNotPermittedFault" => { + return RusotoError::Service( + UndeprecateWorkflowTypeError::OperationNotPermittedFault(err.msg), + ) + } + "TypeAlreadyExistsFault" => { + return RusotoError::Service( + UndeprecateWorkflowTypeError::TypeAlreadyExistsFault(err.msg), + ) + } + "UnknownResourceFault" => { + return RusotoError::Service( + UndeprecateWorkflowTypeError::UnknownResourceFault(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UndeprecateWorkflowTypeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UndeprecateWorkflowTypeError { + fn description(&self) -> &str { + match *self { + UndeprecateWorkflowTypeError::OperationNotPermittedFault(ref cause) => cause, + UndeprecateWorkflowTypeError::TypeAlreadyExistsFault(ref cause) => cause, + UndeprecateWorkflowTypeError::UnknownResourceFault(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

Returned by any operation if a system imposed limitation has been reached. To address this fault you should either clean up unused resources or increase the limit by contacting AWS.

+ LimitExceededFault(String), + ///

Returned when the caller doesn't have sufficient permissions to invoke the action.

+ OperationNotPermittedFault(String), + ///

Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

+ UnknownResourceFault(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "LimitExceededFault" => { + return RusotoError::Service(UntagResourceError::LimitExceededFault(err.msg)) + } + "OperationNotPermittedFault" => { + return RusotoError::Service(UntagResourceError::OperationNotPermittedFault( + err.msg, + )) + } + "UnknownResourceFault" => { + return RusotoError::Service(UntagResourceError::UnknownResourceFault(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::LimitExceededFault(ref cause) => cause, + UntagResourceError::OperationNotPermittedFault(ref cause) => cause, + UntagResourceError::UnknownResourceFault(ref cause) => cause, + } + } +} /// Trait representing the capabilities of the Amazon SWF API. Amazon SWF clients implement this trait. pub trait Swf { - ///

Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_closed_workflow_executions( &self, input: CountClosedWorkflowExecutionsInput, ) -> RusotoFuture; - ///

Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_open_workflow_executions( &self, input: CountOpenWorkflowExecutionsInput, ) -> RusotoFuture; - ///

Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_pending_activity_tasks( &self, input: CountPendingActivityTasksInput, ) -> RusotoFuture; - ///

Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_pending_decision_tasks( &self, input: CountPendingDecisionTasksInput, ) -> RusotoFuture; - ///

Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn deprecate_activity_type( &self, input: DeprecateActivityTypeInput, ) -> RusotoFuture<(), DeprecateActivityTypeError>; - ///

Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated continues to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated continues to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn deprecate_domain( &self, input: DeprecateDomainInput, ) -> RusotoFuture<(), DeprecateDomainError>; - ///

Deprecates the specified workflow type. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated continues to run. A deprecated workflow type may still be used when calling visibility actions.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Deprecates the specified workflow type. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated continues to run. A deprecated workflow type may still be used when calling visibility actions.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn deprecate_workflow_type( &self, input: DeprecateWorkflowTypeInput, ) -> RusotoFuture<(), DeprecateWorkflowTypeError>; - ///

Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_activity_type( &self, input: DescribeActivityTypeInput, ) -> RusotoFuture; - ///

Returns information about the specified domain, including description and status.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified domain, including description and status.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_domain( &self, input: DescribeDomainInput, ) -> RusotoFuture; - ///

Returns information about the specified workflow execution including its type and some statistics.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified workflow execution including its type and some statistics.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_workflow_execution( &self, input: DescribeWorkflowExecutionInput, ) -> RusotoFuture; - ///

Returns information about the specified workflow type. This includes configuration settings specified when the type was registered and other information such as creation date, current status, etc.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified workflow type. This includes configuration settings specified when the type was registered and other information such as creation date, current status, etc.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_workflow_type( &self, input: DescribeWorkflowTypeInput, ) -> RusotoFuture; - ///

Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn get_workflow_execution_history( &self, input: GetWorkflowExecutionHistoryInput, ) -> RusotoFuture; - ///

Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_activity_types( &self, input: ListActivityTypesInput, ) -> RusotoFuture; - ///

Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_closed_workflow_executions( &self, input: ListClosedWorkflowExecutionsInput, ) -> RusotoFuture; - ///

Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account ID, with no dashes.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account ID, with no dashes.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_domains(&self, input: ListDomainsInput) -> RusotoFuture; - ///

Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_open_workflow_executions( &self, input: ListOpenWorkflowExecutionsInput, ) -> RusotoFuture; - ///

Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

List tags for a given domain.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceInput, + ) -> RusotoFuture; + + ///

Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_workflow_types( &self, input: ListWorkflowTypesInput, ) -> RusotoFuture; - ///

Used by workers to get an ActivityTask from the specified activity taskList. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.

Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to get an ActivityTask from the specified activity taskList. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.

Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn poll_for_activity_task( &self, input: PollForActivityTaskInput, ) -> RusotoFuture; - ///

Used by deciders to get a DecisionTask from the specified decision taskList. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.

This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.

Deciders should set their client side socket timeout to at least 70 seconds (10 seconds higher than the timeout).

Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask using the nextPageToken returned by the initial call. Note that you do not call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask again.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by deciders to get a DecisionTask from the specified decision taskList. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.

This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.

Deciders should set their client side socket timeout to at least 70 seconds (10 seconds higher than the timeout).

Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask using the nextPageToken returned by the initial call. Note that you do not call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask again.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn poll_for_decision_task( &self, input: PollForDecisionTaskInput, ) -> RusotoFuture; - ///

Used by activity workers to report to the service that the ActivityTask represented by the specified taskToken is still making progress. The worker can also specify details of the progress, for example percent complete, using the details parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean cancelRequested flag returned by the service is set to true.

This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout is specified in RegisterActivityType.

This action doesn't in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history contains a ActivityTaskTimedOut event that contains the information from the last heartbeat generated by the activity worker.

The taskStartToCloseTimeout of an activity type is the maximum duration of an activity task, regardless of the number of RecordActivityTaskHeartbeat requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType.

This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted.

If the cancelRequested flag returns true, a cancellation is being attempted. If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. Otherwise, it should ignore the cancellation request.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by activity workers to report to the service that the ActivityTask represented by the specified taskToken is still making progress. The worker can also specify details of the progress, for example percent complete, using the details parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean cancelRequested flag returned by the service is set to true.

This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout is specified in RegisterActivityType.

This action doesn't in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history contains a ActivityTaskTimedOut event that contains the information from the last heartbeat generated by the activity worker.

The taskStartToCloseTimeout of an activity type is the maximum duration of an activity task, regardless of the number of RecordActivityTaskHeartbeat requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType.

This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted.

If the cancelRequested flag returns true, a cancellation is being attempted. If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. Otherwise, it should ignore the cancellation request.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn record_activity_task_heartbeat( &self, input: RecordActivityTaskHeartbeatInput, ) -> RusotoFuture; - ///

Registers a new activity type along with its configuration settings in the specified domain.

A TypeAlreadyExists fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Registers a new activity type along with its configuration settings in the specified domain.

A TypeAlreadyExists fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn register_activity_type( &self, input: RegisterActivityTypeInput, ) -> RusotoFuture<(), RegisterActivityTypeError>; - ///

Registers a new domain.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Registers a new domain.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn register_domain(&self, input: RegisterDomainInput) -> RusotoFuture<(), RegisterDomainError>; - ///

Registers a new workflow type and its configuration settings in the specified domain.

The retention period for the workflow history is set by the RegisterDomain action.

If the type already exists, then a TypeAlreadyExists fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Registers a new workflow type and its configuration settings in the specified domain.

The retention period for the workflow history is set by the RegisterDomain action.

If the type already exists, then a TypeAlreadyExists fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn register_workflow_type( &self, input: RegisterWorkflowTypeInput, ) -> RusotoFuture<(), RegisterWorkflowTypeError>; - ///

Records a WorkflowExecutionCancelRequested event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.

If the runId isn't specified, the WorkflowExecutionCancelRequested event is recorded in the history of the current open workflow execution with the specified workflowId in the domain.

Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution when possible.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Records a WorkflowExecutionCancelRequested event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.

If the runId isn't specified, the WorkflowExecutionCancelRequested event is recorded in the history of the current open workflow execution with the specified workflowId in the domain.

Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution when possible.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn request_cancel_workflow_execution( &self, input: RequestCancelWorkflowExecutionInput, ) -> RusotoFuture<(), RequestCancelWorkflowExecutionError>; - ///

Used by workers to tell the service that the ActivityTask identified by the taskToken was successfully canceled. Additional details can be provided using the details argument.

These details (if provided) appear in the ActivityTaskCanceled event added to the workflow history.

Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat request returns true and if the activity can be safely undone or abandoned.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to tell the service that the ActivityTask identified by the taskToken was successfully canceled. Additional details can be provided using the details argument.

These details (if provided) appear in the ActivityTaskCanceled event added to the workflow history.

Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat request returns true and if the activity can be safely undone or abandoned.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_activity_task_canceled( &self, input: RespondActivityTaskCanceledInput, ) -> RusotoFuture<(), RespondActivityTaskCanceledError>; - ///

Used by workers to tell the service that the ActivityTask identified by the taskToken completed successfully with a result (if provided). The result appears in the ActivityTaskCompleted event in the workflow history.

If the requested task doesn't complete successfully, use RespondActivityTaskFailed instead. If the worker finds that the task is canceled through the canceled flag returned by RecordActivityTaskHeartbeat, it should cancel the task, clean up and then call RespondActivityTaskCanceled.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to tell the service that the ActivityTask identified by the taskToken completed successfully with a result (if provided). The result appears in the ActivityTaskCompleted event in the workflow history.

If the requested task doesn't complete successfully, use RespondActivityTaskFailed instead. If the worker finds that the task is canceled through the canceled flag returned by RecordActivityTaskHeartbeat, it should cancel the task, clean up and then call RespondActivityTaskCanceled.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_activity_task_completed( &self, input: RespondActivityTaskCompletedInput, ) -> RusotoFuture<(), RespondActivityTaskCompletedError>; - ///

Used by workers to tell the service that the ActivityTask identified by the taskToken has failed with reason (if specified). The reason and details appear in the ActivityTaskFailed event added to the workflow history.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to tell the service that the ActivityTask identified by the taskToken has failed with reason (if specified). The reason and details appear in the ActivityTaskFailed event added to the workflow history.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_activity_task_failed( &self, input: RespondActivityTaskFailedInput, ) -> RusotoFuture<(), RespondActivityTaskFailedError>; - ///

Used by deciders to tell the service that the DecisionTask identified by the taskToken has successfully completed. The decisions argument specifies the list of decisions made while processing the task.

A DecisionTaskCompleted event is added to the workflow history. The executionContext specified is attached to the event in the workflow execution history.

Access Control

If an IAM policy grants permission to use RespondDecisionTaskCompleted, it can express permissions for the list of decisions in the decisions parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by deciders to tell the service that the DecisionTask identified by the taskToken has successfully completed. The decisions argument specifies the list of decisions made while processing the task.

A DecisionTaskCompleted event is added to the workflow history. The executionContext specified is attached to the event in the workflow execution history.

Access Control

If an IAM policy grants permission to use RespondDecisionTaskCompleted, it can express permissions for the list of decisions in the decisions parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_decision_task_completed( &self, input: RespondDecisionTaskCompletedInput, ) -> RusotoFuture<(), RespondDecisionTaskCompletedError>; - ///

Records a WorkflowExecutionSignaled event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).

If a runId isn't specified, then the WorkflowExecutionSignaled event is recorded in the history of the current open workflow with the matching workflowId in the domain.

If the specified workflow execution isn't open, this method fails with UnknownResource.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Records a WorkflowExecutionSignaled event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).

If a runId isn't specified, then the WorkflowExecutionSignaled event is recorded in the history of the current open workflow with the matching workflowId in the domain.

If the specified workflow execution isn't open, this method fails with UnknownResource.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn signal_workflow_execution( &self, input: SignalWorkflowExecutionInput, ) -> RusotoFuture<(), SignalWorkflowExecutionError>; - ///

Starts an execution of the workflow type in the specified domain using the provided workflowId and input data.

This action returns the newly started workflow execution.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagList.member.0: The key is swf:tagList.member.0.

    • tagList.member.1: The key is swf:tagList.member.1.

    • tagList.member.2: The key is swf:tagList.member.2.

    • tagList.member.3: The key is swf:tagList.member.3.

    • tagList.member.4: The key is swf:tagList.member.4.

    • taskList: String constraint. The key is swf:taskList.name.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Starts an execution of the workflow type in the specified domain using the provided workflowId and input data.

This action returns the newly started workflow execution.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagList.member.0: The key is swf:tagList.member.0.

    • tagList.member.1: The key is swf:tagList.member.1.

    • tagList.member.2: The key is swf:tagList.member.2.

    • tagList.member.3: The key is swf:tagList.member.3.

    • tagList.member.4: The key is swf:tagList.member.4.

    • taskList: String constraint. The key is swf:taskList.name.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn start_workflow_execution( &self, input: StartWorkflowExecutionInput, ) -> RusotoFuture; - ///

Records a WorkflowExecutionTerminated event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.

If the identified workflow execution was in progress, it is terminated immediately.

If a runId isn't specified, then the WorkflowExecutionTerminated event is recorded in the history of the current open workflow with the matching workflowId in the domain.

You should consider using RequestCancelWorkflowExecution action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution doesn't.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Add a tag to a Amazon SWF domain.

Amazon SWF supports a maximum of 50 tags per resource.

+ fn tag_resource(&self, input: TagResourceInput) -> RusotoFuture<(), TagResourceError>; + + ///

Records a WorkflowExecutionTerminated event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.

If the identified workflow execution was in progress, it is terminated immediately.

If a runId isn't specified, then the WorkflowExecutionTerminated event is recorded in the history of the current open workflow with the matching workflowId in the domain.

You should consider using RequestCancelWorkflowExecution action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution doesn't.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn terminate_workflow_execution( &self, input: TerminateWorkflowExecutionInput, ) -> RusotoFuture<(), TerminateWorkflowExecutionError>; + + ///

Undeprecates a previously deprecated activity type. After an activity type has been undeprecated, you can create new tasks of that activity type.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ fn undeprecate_activity_type( + &self, + input: UndeprecateActivityTypeInput, + ) -> RusotoFuture<(), UndeprecateActivityTypeError>; + + ///

Undeprecates a previously deprecated domain. After a domain has been undeprecated it can be used to create new workflow executions or register new types.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ fn undeprecate_domain( + &self, + input: UndeprecateDomainInput, + ) -> RusotoFuture<(), UndeprecateDomainError>; + + ///

Undeprecates a previously deprecated workflow type. After a workflow type has been undeprecated, you can create new executions of that type.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ fn undeprecate_workflow_type( + &self, + input: UndeprecateWorkflowTypeInput, + ) -> RusotoFuture<(), UndeprecateWorkflowTypeError>; + + ///

Remove a tag from a Amazon SWF domain.

+ fn untag_resource(&self, input: UntagResourceInput) -> RusotoFuture<(), UntagResourceError>; } /// A client for the Amazon SWF API. #[derive(Clone)] @@ -4464,10 +4886,7 @@ impl SwfClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> SwfClient { - SwfClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4481,15 +4900,19 @@ impl SwfClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - SwfClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> SwfClient { + SwfClient { client, region } } } impl Swf for SwfClient { - ///

Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_closed_workflow_executions( &self, input: CountClosedWorkflowExecutionsInput, @@ -4518,7 +4941,7 @@ impl Swf for SwfClient { }) } - ///

Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_open_workflow_executions( &self, input: CountOpenWorkflowExecutionsInput, @@ -4547,7 +4970,7 @@ impl Swf for SwfClient { }) } - ///

Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_pending_activity_tasks( &self, input: CountPendingActivityTasksInput, @@ -4576,7 +4999,7 @@ impl Swf for SwfClient { }) } - ///

Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn count_pending_decision_tasks( &self, input: CountPendingDecisionTasksInput, @@ -4605,7 +5028,7 @@ impl Swf for SwfClient { }) } - ///

Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn deprecate_activity_type( &self, input: DeprecateActivityTypeInput, @@ -4633,7 +5056,7 @@ impl Swf for SwfClient { }) } - ///

Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated continues to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated continues to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn deprecate_domain( &self, input: DeprecateDomainInput, @@ -4659,7 +5082,7 @@ impl Swf for SwfClient { }) } - ///

Deprecates the specified workflow type. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated continues to run. A deprecated workflow type may still be used when calling visibility actions.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Deprecates the specified workflow type. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated continues to run. A deprecated workflow type may still be used when calling visibility actions.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn deprecate_workflow_type( &self, input: DeprecateWorkflowTypeInput, @@ -4687,7 +5110,7 @@ impl Swf for SwfClient { }) } - ///

Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_activity_type( &self, input: DescribeActivityTypeInput, @@ -4715,7 +5138,7 @@ impl Swf for SwfClient { }) } - ///

Returns information about the specified domain, including description and status.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified domain, including description and status.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_domain( &self, input: DescribeDomainInput, @@ -4743,7 +5166,7 @@ impl Swf for SwfClient { }) } - ///

Returns information about the specified workflow execution including its type and some statistics.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified workflow execution including its type and some statistics.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_workflow_execution( &self, input: DescribeWorkflowExecutionInput, @@ -4772,7 +5195,7 @@ impl Swf for SwfClient { }) } - ///

Returns information about the specified workflow type. This includes configuration settings specified when the type was registered and other information such as creation date, current status, etc.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about the specified workflow type. This includes configuration settings specified when the type was registered and other information such as creation date, current status, etc.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn describe_workflow_type( &self, input: DescribeWorkflowTypeInput, @@ -4800,7 +5223,7 @@ impl Swf for SwfClient { }) } - ///

Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn get_workflow_execution_history( &self, input: GetWorkflowExecutionHistoryInput, @@ -4828,7 +5251,7 @@ impl Swf for SwfClient { }) } - ///

Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_activity_types( &self, input: ListActivityTypesInput, @@ -4857,7 +5280,7 @@ impl Swf for SwfClient { }) } - ///

Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_closed_workflow_executions( &self, input: ListClosedWorkflowExecutionsInput, @@ -4886,7 +5309,7 @@ impl Swf for SwfClient { }) } - ///

Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account ID, with no dashes.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account ID, with no dashes.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_domains(&self, input: ListDomainsInput) -> RusotoFuture { let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); @@ -4911,7 +5334,7 @@ impl Swf for SwfClient { }) } - ///

Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.

    • typeFilter.name: String constraint. The key is swf:typeFilter.name.

    • typeFilter.version: String constraint. The key is swf:typeFilter.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_open_workflow_executions( &self, input: ListOpenWorkflowExecutionsInput, @@ -4940,7 +5363,35 @@ impl Swf for SwfClient { }) } - ///

Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

List tags for a given domain.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceInput, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header("x-amz-target", "SimpleWorkflowService.ListTagsForResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + + ///

Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn list_workflow_types( &self, input: ListWorkflowTypesInput, @@ -4969,7 +5420,7 @@ impl Swf for SwfClient { }) } - ///

Used by workers to get an ActivityTask from the specified activity taskList. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.

Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to get an ActivityTask from the specified activity taskList. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.

Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn poll_for_activity_task( &self, input: PollForActivityTaskInput, @@ -4996,7 +5447,7 @@ impl Swf for SwfClient { }) } - ///

Used by deciders to get a DecisionTask from the specified decision taskList. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.

This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.

Deciders should set their client side socket timeout to at least 70 seconds (10 seconds higher than the timeout).

Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask using the nextPageToken returned by the initial call. Note that you do not call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask again.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by deciders to get a DecisionTask from the specified decision taskList. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.

This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.

Deciders should set their client side socket timeout to at least 70 seconds (10 seconds higher than the timeout).

Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask using the nextPageToken returned by the initial call. Note that you do not call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask again.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn poll_for_decision_task( &self, input: PollForDecisionTaskInput, @@ -5023,7 +5474,7 @@ impl Swf for SwfClient { }) } - ///

Used by activity workers to report to the service that the ActivityTask represented by the specified taskToken is still making progress. The worker can also specify details of the progress, for example percent complete, using the details parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean cancelRequested flag returned by the service is set to true.

This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout is specified in RegisterActivityType.

This action doesn't in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history contains a ActivityTaskTimedOut event that contains the information from the last heartbeat generated by the activity worker.

The taskStartToCloseTimeout of an activity type is the maximum duration of an activity task, regardless of the number of RecordActivityTaskHeartbeat requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType.

This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted.

If the cancelRequested flag returns true, a cancellation is being attempted. If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. Otherwise, it should ignore the cancellation request.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by activity workers to report to the service that the ActivityTask represented by the specified taskToken is still making progress. The worker can also specify details of the progress, for example percent complete, using the details parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean cancelRequested flag returned by the service is set to true.

This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout is specified in RegisterActivityType.

This action doesn't in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history contains a ActivityTaskTimedOut event that contains the information from the last heartbeat generated by the activity worker.

The taskStartToCloseTimeout of an activity type is the maximum duration of an activity task, regardless of the number of RecordActivityTaskHeartbeat requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType.

This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted.

If the cancelRequested flag returns true, a cancellation is being attempted. If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. Otherwise, it should ignore the cancellation request.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn record_activity_task_heartbeat( &self, input: RecordActivityTaskHeartbeatInput, @@ -5052,7 +5503,7 @@ impl Swf for SwfClient { }) } - ///

Registers a new activity type along with its configuration settings in the specified domain.

A TypeAlreadyExists fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Registers a new activity type along with its configuration settings in the specified domain.

A TypeAlreadyExists fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn register_activity_type( &self, input: RegisterActivityTypeInput, @@ -5077,7 +5528,7 @@ impl Swf for SwfClient { }) } - ///

Registers a new domain.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Registers a new domain.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn register_domain(&self, input: RegisterDomainInput) -> RusotoFuture<(), RegisterDomainError> { let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); @@ -5100,7 +5551,7 @@ impl Swf for SwfClient { }) } - ///

Registers a new workflow type and its configuration settings in the specified domain.

The retention period for the workflow history is set by the RegisterDomain action.

If the type already exists, then a TypeAlreadyExists fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Registers a new workflow type and its configuration settings in the specified domain.

The retention period for the workflow history is set by the RegisterDomain action.

If the type already exists, then a TypeAlreadyExists fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.

    • name: String constraint. The key is swf:name.

    • version: String constraint. The key is swf:version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn register_workflow_type( &self, input: RegisterWorkflowTypeInput, @@ -5125,7 +5576,7 @@ impl Swf for SwfClient { }) } - ///

Records a WorkflowExecutionCancelRequested event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.

If the runId isn't specified, the WorkflowExecutionCancelRequested event is recorded in the history of the current open workflow execution with the specified workflowId in the domain.

Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution when possible.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Records a WorkflowExecutionCancelRequested event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.

If the runId isn't specified, the WorkflowExecutionCancelRequested event is recorded in the history of the current open workflow execution with the specified workflowId in the domain.

Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution when possible.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn request_cancel_workflow_execution( &self, input: RequestCancelWorkflowExecutionInput, @@ -5151,7 +5602,7 @@ impl Swf for SwfClient { }) } - ///

Used by workers to tell the service that the ActivityTask identified by the taskToken was successfully canceled. Additional details can be provided using the details argument.

These details (if provided) appear in the ActivityTaskCanceled event added to the workflow history.

Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat request returns true and if the activity can be safely undone or abandoned.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to tell the service that the ActivityTask identified by the taskToken was successfully canceled. Additional details can be provided using the details argument.

These details (if provided) appear in the ActivityTaskCanceled event added to the workflow history.

Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat request returns true and if the activity can be safely undone or abandoned.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_activity_task_canceled( &self, input: RespondActivityTaskCanceledInput, @@ -5177,7 +5628,7 @@ impl Swf for SwfClient { }) } - ///

Used by workers to tell the service that the ActivityTask identified by the taskToken completed successfully with a result (if provided). The result appears in the ActivityTaskCompleted event in the workflow history.

If the requested task doesn't complete successfully, use RespondActivityTaskFailed instead. If the worker finds that the task is canceled through the canceled flag returned by RecordActivityTaskHeartbeat, it should cancel the task, clean up and then call RespondActivityTaskCanceled.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to tell the service that the ActivityTask identified by the taskToken completed successfully with a result (if provided). The result appears in the ActivityTaskCompleted event in the workflow history.

If the requested task doesn't complete successfully, use RespondActivityTaskFailed instead. If the worker finds that the task is canceled through the canceled flag returned by RecordActivityTaskHeartbeat, it should cancel the task, clean up and then call RespondActivityTaskCanceled.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_activity_task_completed( &self, input: RespondActivityTaskCompletedInput, @@ -5203,7 +5654,7 @@ impl Swf for SwfClient { }) } - ///

Used by workers to tell the service that the ActivityTask identified by the taskToken has failed with reason (if specified). The reason and details appear in the ActivityTaskFailed event added to the workflow history.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by workers to tell the service that the ActivityTask identified by the taskToken has failed with reason (if specified). The reason and details appear in the ActivityTaskFailed event added to the workflow history.

A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_activity_task_failed( &self, input: RespondActivityTaskFailedInput, @@ -5229,7 +5680,7 @@ impl Swf for SwfClient { }) } - ///

Used by deciders to tell the service that the DecisionTask identified by the taskToken has successfully completed. The decisions argument specifies the list of decisions made while processing the task.

A DecisionTaskCompleted event is added to the workflow history. The executionContext specified is attached to the event in the workflow execution history.

Access Control

If an IAM policy grants permission to use RespondDecisionTaskCompleted, it can express permissions for the list of decisions in the decisions parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Used by deciders to tell the service that the DecisionTask identified by the taskToken has successfully completed. The decisions argument specifies the list of decisions made while processing the task.

A DecisionTaskCompleted event is added to the workflow history. The executionContext specified is attached to the event in the workflow execution history.

Access Control

If an IAM policy grants permission to use RespondDecisionTaskCompleted, it can express permissions for the list of decisions in the decisions parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn respond_decision_task_completed( &self, input: RespondDecisionTaskCompletedInput, @@ -5255,7 +5706,7 @@ impl Swf for SwfClient { }) } - ///

Records a WorkflowExecutionSignaled event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).

If a runId isn't specified, then the WorkflowExecutionSignaled event is recorded in the history of the current open workflow with the matching workflowId in the domain.

If the specified workflow execution isn't open, this method fails with UnknownResource.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Records a WorkflowExecutionSignaled event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).

If a runId isn't specified, then the WorkflowExecutionSignaled event is recorded in the history of the current open workflow with the matching workflowId in the domain.

If the specified workflow execution isn't open, this method fails with UnknownResource.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn signal_workflow_execution( &self, input: SignalWorkflowExecutionInput, @@ -5281,7 +5732,7 @@ impl Swf for SwfClient { }) } - ///

Starts an execution of the workflow type in the specified domain using the provided workflowId and input data.

This action returns the newly started workflow execution.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagList.member.0: The key is swf:tagList.member.0.

    • tagList.member.1: The key is swf:tagList.member.1.

    • tagList.member.2: The key is swf:tagList.member.2.

    • tagList.member.3: The key is swf:tagList.member.3.

    • tagList.member.4: The key is swf:tagList.member.4.

    • taskList: String constraint. The key is swf:taskList.name.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Starts an execution of the workflow type in the specified domain using the provided workflowId and input data.

This action returns the newly started workflow execution.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • tagList.member.0: The key is swf:tagList.member.0.

    • tagList.member.1: The key is swf:tagList.member.1.

    • tagList.member.2: The key is swf:tagList.member.2.

    • tagList.member.3: The key is swf:tagList.member.3.

    • tagList.member.4: The key is swf:tagList.member.4.

    • taskList: String constraint. The key is swf:taskList.name.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn start_workflow_execution( &self, input: StartWorkflowExecutionInput, @@ -5311,7 +5762,30 @@ impl Swf for SwfClient { }) } - ///

Records a WorkflowExecutionTerminated event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.

If the identified workflow execution was in progress, it is terminated immediately.

If a runId isn't specified, then the WorkflowExecutionTerminated event is recorded in the history of the current open workflow with the matching workflowId in the domain.

You should consider using RequestCancelWorkflowExecution action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution doesn't.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ ///

Add a tag to a Amazon SWF domain.

Amazon SWF supports a maximum of 50 tags per resource.

+ fn tag_resource(&self, input: TagResourceInput) -> RusotoFuture<(), TagResourceError> { + let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header("x-amz-target", "SimpleWorkflowService.TagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

Records a WorkflowExecutionTerminated event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.

If the identified workflow execution was in progress, it is terminated immediately.

If a runId isn't specified, then the WorkflowExecutionTerminated event is recorded in the history of the current open workflow with the matching workflowId in the domain.

You should consider using RequestCancelWorkflowExecution action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution doesn't.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

fn terminate_workflow_execution( &self, input: TerminateWorkflowExecutionInput, @@ -5336,4 +5810,105 @@ impl Swf for SwfClient { } }) } + + ///

Undeprecates a previously deprecated activity type. After an activity type has been undeprecated, you can create new tasks of that activity type.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ fn undeprecate_activity_type( + &self, + input: UndeprecateActivityTypeInput, + ) -> RusotoFuture<(), UndeprecateActivityTypeError> { + let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header( + "x-amz-target", + "SimpleWorkflowService.UndeprecateActivityType", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(UndeprecateActivityTypeError::from_response(response)) + })) + } + }) + } + + ///

Undeprecates a previously deprecated domain. After a domain has been undeprecated it can be used to create new workflow executions or register new types.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • You cannot use an IAM policy to constrain this action's parameters.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ fn undeprecate_domain( + &self, + input: UndeprecateDomainInput, + ) -> RusotoFuture<(), UndeprecateDomainError> { + let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header("x-amz-target", "SimpleWorkflowService.UndeprecateDomain"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UndeprecateDomainError::from_response(response))), + ) + } + }) + } + + ///

Undeprecates a previously deprecated workflow type. After a workflow type has been undeprecated, you can create new executions of that type.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

+ fn undeprecate_workflow_type( + &self, + input: UndeprecateWorkflowTypeInput, + ) -> RusotoFuture<(), UndeprecateWorkflowTypeError> { + let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header( + "x-amz-target", + "SimpleWorkflowService.UndeprecateWorkflowType", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(UndeprecateWorkflowTypeError::from_response(response)) + })) + } + }) + } + + ///

Remove a tag from a Amazon SWF domain.

+ fn untag_resource(&self, input: UntagResourceInput) -> RusotoFuture<(), UntagResourceError> { + let mut request = SignedRequest::new("POST", "swf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.0".to_owned()); + request.add_header("x-amz-target", "SimpleWorkflowService.UntagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } } diff --git a/rusoto/services/swf/src/lib.rs b/rusoto/services/swf/src/lib.rs index 0705d47c7dc..2b81d33cad0 100644 --- a/rusoto/services/swf/src/lib.rs +++ b/rusoto/services/swf/src/lib.rs @@ -12,7 +12,7 @@ // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] -//!

Amazon Simple Workflow Service

The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications that use Amazon's cloud to coordinate work across distributed components. In Amazon SWF, a task represents a logical unit of work that is performed by a component of your workflow. Coordinating tasks in a workflow involves managing intertask dependencies, scheduling, and concurrency in accordance with the logical flow of the application.

Amazon SWF gives you full control over implementing tasks and coordinating them without worrying about underlying complexities such as tracking their progress and maintaining their state.

This documentation serves as reference only. For a broader overview of the Amazon SWF programming model, see the Amazon SWF Developer Guide .

+//!

Amazon Simple Workflow Service

The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications that use Amazon's cloud to coordinate work across distributed components. In Amazon SWF, a task represents a logical unit of work that is performed by a component of your workflow. Coordinating tasks in a workflow involves managing intertask dependencies, scheduling, and concurrency in accordance with the logical flow of the application.

Amazon SWF gives you full control over implementing tasks and coordinating them without worrying about underlying complexities such as tracking their progress and maintaining their state.

This documentation serves as reference only. For a broader overview of the Amazon SWF programming model, see the Amazon SWF Developer Guide .

//! //! If you're using the service, you're probably looking for [SwfClient](struct.SwfClient.html) and [Swf](trait.Swf.html). diff --git a/rusoto/services/textract/Cargo.toml b/rusoto/services/textract/Cargo.toml new file mode 100644 index 00000000000..27419d3e2ec --- /dev/null +++ b/rusoto/services/textract/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - Amazon Textract @ 2018-06-27" +documentation = "https://docs.rs/rusoto_textract" +keywords = ["AWS", "Amazon", "textract"] +license = "MIT" +name = "rusoto_textract" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/textract/README.md b/rusoto/services/textract/README.md new file mode 100644 index 00000000000..ca987206cfd --- /dev/null +++ b/rusoto/services/textract/README.md @@ -0,0 +1,52 @@ + +# Rusoto Textract +Rust SDK for Amazon Textract + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_textract` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_textract = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_textract "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/textract/src/custom/mod.rs b/rusoto/services/textract/src/custom/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rusoto/services/textract/src/generated.rs b/rusoto/services/textract/src/generated.rs new file mode 100644 index 00000000000..d769f0ef8f9 --- /dev/null +++ b/rusoto/services/textract/src/generated.rs @@ -0,0 +1,1194 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct AnalyzeDocumentRequest { + ///

The input document as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Textract operations, you can't pass image bytes. The document must be an image in JPG or PNG format.

If you are using an AWS SDK to call Amazon Textract, you might not need to base64-encode image bytes passed using the Bytes field.

+ #[serde(rename = "Document")] + pub document: Document, + ///

A list of the types of analysis to perform. Add TABLES to the list to return information about the tables detected in the input document. Add FORMS to return detected fields and the associated text. To perform both types of analysis, add TABLES and FORMS to FeatureTypes.

+ #[serde(rename = "FeatureTypes")] + pub feature_types: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct AnalyzeDocumentResponse { + ///

The text that's detected and analyzed by AnalyzeDocument.

+ #[serde(rename = "Blocks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub blocks: Option>, + ///

Metadata about the analyzed document. An example is the number of pages.

+ #[serde(rename = "DocumentMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub document_metadata: Option, +} + +///

A Block represents items that are recognized in a document within a group of pixels close to each other. The information returned in a Block depends on the type of operation. In document-text detection (for example DetectDocumentText), you get information about the detected words and lines of text. In text analysis (for example AnalyzeDocument), you can also get information about the fields, tables and selection elements that are detected in the document.

An array of Block objects is returned by both synchronous and asynchronous operations. In synchronous operations, such as DetectDocumentText, the array of Block objects is the entire set of results. In asynchronous operations, such as GetDocumentAnalysis, the array is returned over one or more responses.

For more information, see How Amazon Textract Works.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Block { + ///

The type of text that's recognized in a block. In text-detection operations, the following types are returned:

  • PAGE - Contains a list of the LINE Block objects that are detected on a document page.

  • WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces.

  • LINE - A string of tab-delimited, contiguous words that's detected on a document page.

In text analysis operations, the following types are returned:

  • PAGE - Contains a list of child Block objects that are detected on a document page.

  • KEYVALUESET - Stores the KEY and VALUE Block objects for a field that's detected on a document page. Use the EntityType field to determine if a KEYVALUESET object is a KEY Block object or a VALUE Block object.

  • WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces that's detected on a document page.

  • LINE - A string of tab-delimited, contiguous words that's detected on a document page.

  • TABLE - A table that's detected on a document page. A table is any grid-based information with 2 or more rows or columns with a cell span of 1 row and 1 column each.

  • CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell.

  • SELECTION_ELEMENT - A selectable element such as a radio button or checkbox that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element.

+ #[serde(rename = "BlockType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub block_type: Option, + ///

The column in which a table cell appears. The first column position is 1. ColumnIndex isn't returned by DetectDocumentText and GetDocumentTextDetection.

+ #[serde(rename = "ColumnIndex")] + #[serde(skip_serializing_if = "Option::is_none")] + pub column_index: Option, + ///

The number of columns that a table cell spans. ColumnSpan isn't returned by DetectDocumentText and GetDocumentTextDetection.

+ #[serde(rename = "ColumnSpan")] + #[serde(skip_serializing_if = "Option::is_none")] + pub column_span: Option, + ///

The confidence that Amazon Textract has in the accuracy of the recognized text and the accuracy of the geometry points around the recognized text.

+ #[serde(rename = "Confidence")] + #[serde(skip_serializing_if = "Option::is_none")] + pub confidence: Option, + ///

The type of entity. The following can be returned:

  • KEY - An identifier for a field on the document.

  • VALUE - The field text.

EntityTypes isn't returned by DetectDocumentText and GetDocumentTextDetection.

+ #[serde(rename = "EntityTypes")] + #[serde(skip_serializing_if = "Option::is_none")] + pub entity_types: Option>, + ///

The location of the recognized text on the image. It includes an axis-aligned, coarse bounding box that surrounds the text, and a finer-grain polygon for more accurate spatial information.

+ #[serde(rename = "Geometry")] + #[serde(skip_serializing_if = "Option::is_none")] + pub geometry: Option, + ///

The identifier for the recognized text. The identifier is only unique for a single operation.

+ #[serde(rename = "Id")] + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option, + ///

The page in which a block was detected. Page is returned by asynchronous operations. Page values greater than 1 are only returned for multi-page documents that are in PDF format. A scanned image (JPG/PNG), even if it contains multiple document pages, is always considered to be a single-page document and the value of Page is always 1. Synchronous operations don't return Page as every input document is considered to be a single-page document.

+ #[serde(rename = "Page")] + #[serde(skip_serializing_if = "Option::is_none")] + pub page: Option, + ///

A list of child blocks of the current block. For example a LINE object has child blocks for each WORD block that's part of the line of text. There aren't Relationship objects in the list for relationships that don't exist, such as when the current block has no child blocks. The list size can be the following:

  • 0 - The block has no child blocks.

  • 1 - The block has child blocks.

+ #[serde(rename = "Relationships")] + #[serde(skip_serializing_if = "Option::is_none")] + pub relationships: Option>, + ///

The row in which a table cell is located. The first row position is 1. RowIndex isn't returned by DetectDocumentText and GetDocumentTextDetection.

+ #[serde(rename = "RowIndex")] + #[serde(skip_serializing_if = "Option::is_none")] + pub row_index: Option, + ///

The number of rows that a table spans. RowSpan isn't returned by DetectDocumentText and GetDocumentTextDetection.

+ #[serde(rename = "RowSpan")] + #[serde(skip_serializing_if = "Option::is_none")] + pub row_span: Option, + ///

The selection status of a selectable element such as a radio button or checkbox.

+ #[serde(rename = "SelectionStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub selection_status: Option, + ///

The word or line of text that's recognized by Amazon Textract.

+ #[serde(rename = "Text")] + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, +} + +///

The bounding box around the recognized text, key, value, table or table cell on a document page. The left (x-coordinate) and top (y-coordinate) are coordinates that represent the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).

The top and left values returned are ratios of the overall document page size. For example, if the input image is 700 x 200 pixels, and the top-left coordinate of the bounding box is 350 x 50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200).

The width and height values represent the dimensions of the bounding box as a ratio of the overall document page dimension. For example, if the document page size is 700 x 200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct BoundingBox { + ///

The height of the bounding box as a ratio of the overall document page height.

+ #[serde(rename = "Height")] + #[serde(skip_serializing_if = "Option::is_none")] + pub height: Option, + ///

The left coordinate of the bounding box as a ratio of overall document page width.

+ #[serde(rename = "Left")] + #[serde(skip_serializing_if = "Option::is_none")] + pub left: Option, + ///

The top coordinate of the bounding box as a ratio of overall document page height.

+ #[serde(rename = "Top")] + #[serde(skip_serializing_if = "Option::is_none")] + pub top: Option, + ///

The width of the bounding box as a ratio of the overall document page width.

+ #[serde(rename = "Width")] + #[serde(skip_serializing_if = "Option::is_none")] + pub width: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DetectDocumentTextRequest { + ///

The input document as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Textract operations, you can't pass image bytes. The document must be an image in JPG or PNG format.

If you are using an AWS SDK to call Amazon Textract, you might not need to base64-encode image bytes passed using the Bytes field.

+ #[serde(rename = "Document")] + pub document: Document, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DetectDocumentTextResponse { + ///

An array of Block objects containing the text detected in the document.

+ #[serde(rename = "Blocks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub blocks: Option>, + ///

Metadata about the document. Contains the number of pages that are detected in the document.

+ #[serde(rename = "DocumentMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub document_metadata: Option, +} + +///

The input document, either as bytes or as an S3 object.

You pass image bytes to an Amazon Textract API operation by using the Bytes property. For example, you would use the Bytes property to pass a document loaded from a local file system. Image bytes passed by using the Bytes property must be base64 encoded. Your code might not need to encode document file bytes if you're using an AWS SDK to call Amazon Textract API operations.

You pass images stored in an S3 bucket to an Amazon Textract API operation by using the S3Object property. Documents stored in an S3 bucket don't need to be base64 encoded.

The AWS Region for the S3 bucket that contains the S3 object must match the AWS Region that you use for Amazon Textract operations.

If you use the AWS CLI to call Amazon Textract operations, passing image bytes using the Bytes property isn't supported. You must first upload the document to an Amazon S3 bucket, and then call the operation using the S3Object property.

For Amazon Textract to process an S3 object, the user must have permission to access the S3 object.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct Document { + ///

A blob of base-64 encoded documents bytes. The maximum size of a document that's provided in a blob of bytes is 5 MB. The document bytes must be in PNG or JPG format.

If you are using an AWS SDK to call Amazon Textract, you might not need to base64-encode image bytes passed using the Bytes field.

+ #[serde(rename = "Bytes")] + #[serde( + deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob", + serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob", + default + )] + #[serde(skip_serializing_if = "Option::is_none")] + pub bytes: Option, + ///

Identifies an S3 object as the document source. The maximum size of a document stored in an S3 bucket is 5 MB.

+ #[serde(rename = "S3Object")] + #[serde(skip_serializing_if = "Option::is_none")] + pub s3_object: Option, +} + +///

The Amazon S3 bucket that contains the document to be processed. It's used by asynchronous operations such as StartDocumentTextDetection.

The input document can be an image file in JPG or PNG format. It can also be a file in PDF format.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DocumentLocation { + ///

The Amazon S3 bucket that contains the input document.

+ #[serde(rename = "S3Object")] + #[serde(skip_serializing_if = "Option::is_none")] + pub s3_object: Option, +} + +///

Information about the input document.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DocumentMetadata { + ///

The number of pages detected in the document.

+ #[serde(rename = "Pages")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pages: Option, +} + +///

Information about where a recognized text, key, value, table, or table cell is located on a document page.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Geometry { + ///

An axis-aligned coarse representation of the location of the recognized text on the document page.

+ #[serde(rename = "BoundingBox")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bounding_box: Option, + ///

Within the bounding box, a fine-grained polygon around the recognized text.

+ #[serde(rename = "Polygon")] + #[serde(skip_serializing_if = "Option::is_none")] + pub polygon: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetDocumentAnalysisRequest { + ///

A unique identifier for the text-detection job. The JobId is returned from StartDocumentAnalysis.

+ #[serde(rename = "JobId")] + pub job_id: String, + ///

The maximum number of results to return per paginated call. The largest value that you can specify is 1,000. If you specify a value greater than 1,000, a maximum of 1,000 results is returned. The default value is 1,000.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

If the previous response was incomplete (because there are more blocks to retrieve), Amazon Textract returns a pagination token in the response. You can use this pagination token to retrieve the next set of blocks.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetDocumentAnalysisResponse { + ///

The results of the text analysis operation.

+ #[serde(rename = "Blocks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub blocks: Option>, + ///

Information about a document that Amazon Textract processed. DocumentMetadata is returned in every page of paginated responses from an Amazon Textract video operation.

+ #[serde(rename = "DocumentMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub document_metadata: Option, + ///

The current status of the text detection job.

+ #[serde(rename = "JobStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_status: Option, + ///

If the response is truncated, Amazon Textract returns this token. You can use this token in the subsequent request to retrieve the next set of text detection results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The current status of an asynchronous document analysis operation.

+ #[serde(rename = "StatusMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status_message: Option, + ///

A list of warnings that occurred during the document analysis operation.

+ #[serde(rename = "Warnings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub warnings: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct GetDocumentTextDetectionRequest { + ///

A unique identifier for the text detection job. The JobId is returned from StartDocumentTextDetection.

+ #[serde(rename = "JobId")] + pub job_id: String, + ///

The maximum number of results to return per paginated call. The largest value you can specify is 1,000. If you specify a value greater than 1,000, a maximum of 1,000 results is returned. The default value is 1,000.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

If the previous response was incomplete (because there are more blocks to retrieve), Amazon Textract returns a pagination token in the response. You can use this pagination token to retrieve the next set of blocks.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct GetDocumentTextDetectionResponse { + ///

The results of the text-detection operation.

+ #[serde(rename = "Blocks")] + #[serde(skip_serializing_if = "Option::is_none")] + pub blocks: Option>, + ///

Information about a document that Amazon Textract processed. DocumentMetadata is returned in every page of paginated responses from an Amazon Textract video operation.

+ #[serde(rename = "DocumentMetadata")] + #[serde(skip_serializing_if = "Option::is_none")] + pub document_metadata: Option, + ///

The current status of the text detection job.

+ #[serde(rename = "JobStatus")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_status: Option, + ///

If the response is truncated, Amazon Textract returns this token. You can use this token in the subsequent request to retrieve the next set of text-detection results.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

The current status of an asynchronous document text-detection operation.

+ #[serde(rename = "StatusMessage")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status_message: Option, + ///

A list of warnings that occurred during the document text-detection operation.

+ #[serde(rename = "Warnings")] + #[serde(skip_serializing_if = "Option::is_none")] + pub warnings: Option>, +} + +///

The Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Textract publishes the completion status of an asynchronous document operation, such as StartDocumentTextDetection.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct NotificationChannel { + ///

The Amazon Resource Name (ARN) of an IAM role that gives Amazon Textract publishing permissions to the Amazon SNS topic.

+ #[serde(rename = "RoleArn")] + pub role_arn: String, + ///

The Amazon SNS topic that Amazon Textract posts the completion status to.

+ #[serde(rename = "SNSTopicArn")] + pub sns_topic_arn: String, +} + +///

The X and Y coordinates of a point on a document page. The X and Y values returned are ratios of the overall document page size. For example, if the input document is 700 x 200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the document page.

An array of Point objects, Polygon, is returned by DetectDocumentText. Polygon represents a fine-grained polygon around detected text. For more information, see Geometry in the Amazon Textract Developer Guide.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Point { + ///

The value of the X coordinate for a point on a Polygon.

+ #[serde(rename = "X")] + #[serde(skip_serializing_if = "Option::is_none")] + pub x: Option, + ///

The value of the Y coordinate for a point on a Polygon.

+ #[serde(rename = "Y")] + #[serde(skip_serializing_if = "Option::is_none")] + pub y: Option, +} + +///

Information about how blocks are related to each other. A Block object contains 0 or more Relation objects in a list, Relationships. For more information, see Block.

The Type element provides the type of the relationship for all blocks in the IDs array.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Relationship { + ///

An array of IDs for related blocks. You can get the type of the relationship from the Type element.

+ #[serde(rename = "Ids")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ids: Option>, + ///

The type of relationship that the blocks in the IDs array have with the current block. The relationship can be VALUE or CHILD.

+ #[serde(rename = "Type")] + #[serde(skip_serializing_if = "Option::is_none")] + pub type_: Option, +} + +///

The S3 bucket name and file name that identifies the document.

The AWS Region for the S3 bucket that contains the document must match the Region that you use for Amazon Textract operations.

For Amazon Textract to process a file in an S3 bucket, the user must have permission to access the S3 bucket and file.

+#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct S3Object { + ///

The name of the S3 bucket.

+ #[serde(rename = "Bucket")] + #[serde(skip_serializing_if = "Option::is_none")] + pub bucket: Option, + ///

The file name of the input document. It must be an image file (.JPG or .PNG format). Asynchronous operations also support PDF files.

+ #[serde(rename = "Name")] + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + ///

If the bucket has versioning enabled, you can specify the object version.

+ #[serde(rename = "Version")] + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartDocumentAnalysisRequest { + ///

The idempotent token that you use to identify the start request. If you use the same token with multiple StartDocumentAnalysis requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidentally started more than once.

+ #[serde(rename = "ClientRequestToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_request_token: Option, + ///

The location of the document to be processed.

+ #[serde(rename = "DocumentLocation")] + pub document_location: DocumentLocation, + ///

A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected fields and the associated text. To perform both types of analysis, add TABLES and FORMS to FeatureTypes. All selectable elements (SELECTION_ELEMENT) that are detected are returned, whatever the value of FeatureTypes.

+ #[serde(rename = "FeatureTypes")] + pub feature_types: Vec, + ///

An identifier you specify that's included in the completion notification that's published to the Amazon SNS topic. For example, you can use JobTag to identify the type of document, such as a tax form or a receipt, that the completion notification corresponds to.

+ #[serde(rename = "JobTag")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_tag: Option, + ///

The Amazon SNS topic ARN that you want Amazon Textract to publish the completion status of the operation to.

+ #[serde(rename = "NotificationChannel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub notification_channel: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartDocumentAnalysisResponse { + ///

The identifier for the document text detection job. Use JobId to identify the job in a subsequent call to GetDocumentAnalysis.

+ #[serde(rename = "JobId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_id: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartDocumentTextDetectionRequest { + ///

The idempotent token that's used to identify the start request. If you use the same token with multiple StartDocumentTextDetection requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidentally started more than once.

+ #[serde(rename = "ClientRequestToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_request_token: Option, + ///

The location of the document to be processed.

+ #[serde(rename = "DocumentLocation")] + pub document_location: DocumentLocation, + ///

An identifier you specify that's included in the completion notification that's published to the Amazon SNS topic. For example, you can use JobTag to identify the type of document, such as a tax form or a receipt, that the completion notification corresponds to.

+ #[serde(rename = "JobTag")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_tag: Option, + ///

The Amazon SNS topic ARN that you want Amazon Textract to publish the completion status of the operation to.

+ #[serde(rename = "NotificationChannel")] + #[serde(skip_serializing_if = "Option::is_none")] + pub notification_channel: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct StartDocumentTextDetectionResponse { + ///

The identifier for the document text-detection job. Use JobId to identify the job in a subsequent call to GetDocumentTextDetection.

+ #[serde(rename = "JobId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub job_id: Option, +} + +///

A warning about an issue that occurred during asynchronous text analysis (StartDocumentAnalysis) or asynchronous document-text detection (StartDocumentTextDetection).

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct Warning { + ///

The error code for the warning.

+ #[serde(rename = "ErrorCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub error_code: Option, + ///

A list of the pages that the warning applies to.

+ #[serde(rename = "Pages")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pages: Option>, +} + +/// Errors returned by AnalyzeDocument +#[derive(Debug, PartialEq)] +pub enum AnalyzeDocumentError { + ///

You aren't authorized to perform the action.

+ AccessDenied(String), + ///

Amazon Textract isn't able to read the document.

+ BadDocument(String), + ///

The document can't be processed because it's too large. The maximum document size for synchronous operations 5 MB. The maximum document size for asynchronous operations is 500 MB for PDF format files.

+ DocumentTooLarge(String), + ///

Amazon Textract experienced a service issue. Try your call again.

+ InternalServerError(String), + ///

An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

+ InvalidParameter(String), + ///

Amazon Textract is unable to access the S3 object that's specified in the request.

+ InvalidS3Object(String), + ///

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

+ ProvisionedThroughputExceeded(String), + ///

Amazon Textract is temporarily unable to process the request. Try your call again.

+ Throttling(String), + ///

The format of the input document isn't supported. Amazon Textract supports documents that are .png or .jpg format.

+ UnsupportedDocument(String), +} + +impl AnalyzeDocumentError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(AnalyzeDocumentError::AccessDenied(err.msg)) + } + "BadDocumentException" => { + return RusotoError::Service(AnalyzeDocumentError::BadDocument(err.msg)) + } + "DocumentTooLargeException" => { + return RusotoError::Service(AnalyzeDocumentError::DocumentTooLarge(err.msg)) + } + "InternalServerError" => { + return RusotoError::Service(AnalyzeDocumentError::InternalServerError(err.msg)) + } + "InvalidParameterException" => { + return RusotoError::Service(AnalyzeDocumentError::InvalidParameter(err.msg)) + } + "InvalidS3ObjectException" => { + return RusotoError::Service(AnalyzeDocumentError::InvalidS3Object(err.msg)) + } + "ProvisionedThroughputExceededException" => { + return RusotoError::Service( + AnalyzeDocumentError::ProvisionedThroughputExceeded(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(AnalyzeDocumentError::Throttling(err.msg)) + } + "UnsupportedDocumentException" => { + return RusotoError::Service(AnalyzeDocumentError::UnsupportedDocument(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for AnalyzeDocumentError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for AnalyzeDocumentError { + fn description(&self) -> &str { + match *self { + AnalyzeDocumentError::AccessDenied(ref cause) => cause, + AnalyzeDocumentError::BadDocument(ref cause) => cause, + AnalyzeDocumentError::DocumentTooLarge(ref cause) => cause, + AnalyzeDocumentError::InternalServerError(ref cause) => cause, + AnalyzeDocumentError::InvalidParameter(ref cause) => cause, + AnalyzeDocumentError::InvalidS3Object(ref cause) => cause, + AnalyzeDocumentError::ProvisionedThroughputExceeded(ref cause) => cause, + AnalyzeDocumentError::Throttling(ref cause) => cause, + AnalyzeDocumentError::UnsupportedDocument(ref cause) => cause, + } + } +} +/// Errors returned by DetectDocumentText +#[derive(Debug, PartialEq)] +pub enum DetectDocumentTextError { + ///

You aren't authorized to perform the action.

+ AccessDenied(String), + ///

Amazon Textract isn't able to read the document.

+ BadDocument(String), + ///

The document can't be processed because it's too large. The maximum document size for synchronous operations 5 MB. The maximum document size for asynchronous operations is 500 MB for PDF format files.

+ DocumentTooLarge(String), + ///

Amazon Textract experienced a service issue. Try your call again.

+ InternalServerError(String), + ///

An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

+ InvalidParameter(String), + ///

Amazon Textract is unable to access the S3 object that's specified in the request.

+ InvalidS3Object(String), + ///

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

+ ProvisionedThroughputExceeded(String), + ///

Amazon Textract is temporarily unable to process the request. Try your call again.

+ Throttling(String), + ///

The format of the input document isn't supported. Amazon Textract supports documents that are .png or .jpg format.

+ UnsupportedDocument(String), +} + +impl DetectDocumentTextError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(DetectDocumentTextError::AccessDenied(err.msg)) + } + "BadDocumentException" => { + return RusotoError::Service(DetectDocumentTextError::BadDocument(err.msg)) + } + "DocumentTooLargeException" => { + return RusotoError::Service(DetectDocumentTextError::DocumentTooLarge(err.msg)) + } + "InternalServerError" => { + return RusotoError::Service(DetectDocumentTextError::InternalServerError( + err.msg, + )) + } + "InvalidParameterException" => { + return RusotoError::Service(DetectDocumentTextError::InvalidParameter(err.msg)) + } + "InvalidS3ObjectException" => { + return RusotoError::Service(DetectDocumentTextError::InvalidS3Object(err.msg)) + } + "ProvisionedThroughputExceededException" => { + return RusotoError::Service( + DetectDocumentTextError::ProvisionedThroughputExceeded(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(DetectDocumentTextError::Throttling(err.msg)) + } + "UnsupportedDocumentException" => { + return RusotoError::Service(DetectDocumentTextError::UnsupportedDocument( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DetectDocumentTextError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DetectDocumentTextError { + fn description(&self) -> &str { + match *self { + DetectDocumentTextError::AccessDenied(ref cause) => cause, + DetectDocumentTextError::BadDocument(ref cause) => cause, + DetectDocumentTextError::DocumentTooLarge(ref cause) => cause, + DetectDocumentTextError::InternalServerError(ref cause) => cause, + DetectDocumentTextError::InvalidParameter(ref cause) => cause, + DetectDocumentTextError::InvalidS3Object(ref cause) => cause, + DetectDocumentTextError::ProvisionedThroughputExceeded(ref cause) => cause, + DetectDocumentTextError::Throttling(ref cause) => cause, + DetectDocumentTextError::UnsupportedDocument(ref cause) => cause, + } + } +} +/// Errors returned by GetDocumentAnalysis +#[derive(Debug, PartialEq)] +pub enum GetDocumentAnalysisError { + ///

You aren't authorized to perform the action.

+ AccessDenied(String), + ///

Amazon Textract experienced a service issue. Try your call again.

+ InternalServerError(String), + ///

An invalid job identifier was passed to GetDocumentAnalysis or to GetDocumentAnalysis.

+ InvalidJobId(String), + ///

An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

+ InvalidParameter(String), + ///

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

+ ProvisionedThroughputExceeded(String), + ///

Amazon Textract is temporarily unable to process the request. Try your call again.

+ Throttling(String), +} + +impl GetDocumentAnalysisError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(GetDocumentAnalysisError::AccessDenied(err.msg)) + } + "InternalServerError" => { + return RusotoError::Service(GetDocumentAnalysisError::InternalServerError( + err.msg, + )) + } + "InvalidJobIdException" => { + return RusotoError::Service(GetDocumentAnalysisError::InvalidJobId(err.msg)) + } + "InvalidParameterException" => { + return RusotoError::Service(GetDocumentAnalysisError::InvalidParameter( + err.msg, + )) + } + "ProvisionedThroughputExceededException" => { + return RusotoError::Service( + GetDocumentAnalysisError::ProvisionedThroughputExceeded(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(GetDocumentAnalysisError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetDocumentAnalysisError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetDocumentAnalysisError { + fn description(&self) -> &str { + match *self { + GetDocumentAnalysisError::AccessDenied(ref cause) => cause, + GetDocumentAnalysisError::InternalServerError(ref cause) => cause, + GetDocumentAnalysisError::InvalidJobId(ref cause) => cause, + GetDocumentAnalysisError::InvalidParameter(ref cause) => cause, + GetDocumentAnalysisError::ProvisionedThroughputExceeded(ref cause) => cause, + GetDocumentAnalysisError::Throttling(ref cause) => cause, + } + } +} +/// Errors returned by GetDocumentTextDetection +#[derive(Debug, PartialEq)] +pub enum GetDocumentTextDetectionError { + ///

You aren't authorized to perform the action.

+ AccessDenied(String), + ///

Amazon Textract experienced a service issue. Try your call again.

+ InternalServerError(String), + ///

An invalid job identifier was passed to GetDocumentAnalysis or to GetDocumentAnalysis.

+ InvalidJobId(String), + ///

An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

+ InvalidParameter(String), + ///

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

+ ProvisionedThroughputExceeded(String), + ///

Amazon Textract is temporarily unable to process the request. Try your call again.

+ Throttling(String), +} + +impl GetDocumentTextDetectionError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(GetDocumentTextDetectionError::AccessDenied( + err.msg, + )) + } + "InternalServerError" => { + return RusotoError::Service( + GetDocumentTextDetectionError::InternalServerError(err.msg), + ) + } + "InvalidJobIdException" => { + return RusotoError::Service(GetDocumentTextDetectionError::InvalidJobId( + err.msg, + )) + } + "InvalidParameterException" => { + return RusotoError::Service(GetDocumentTextDetectionError::InvalidParameter( + err.msg, + )) + } + "ProvisionedThroughputExceededException" => { + return RusotoError::Service( + GetDocumentTextDetectionError::ProvisionedThroughputExceeded(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(GetDocumentTextDetectionError::Throttling(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for GetDocumentTextDetectionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for GetDocumentTextDetectionError { + fn description(&self) -> &str { + match *self { + GetDocumentTextDetectionError::AccessDenied(ref cause) => cause, + GetDocumentTextDetectionError::InternalServerError(ref cause) => cause, + GetDocumentTextDetectionError::InvalidJobId(ref cause) => cause, + GetDocumentTextDetectionError::InvalidParameter(ref cause) => cause, + GetDocumentTextDetectionError::ProvisionedThroughputExceeded(ref cause) => cause, + GetDocumentTextDetectionError::Throttling(ref cause) => cause, + } + } +} +/// Errors returned by StartDocumentAnalysis +#[derive(Debug, PartialEq)] +pub enum StartDocumentAnalysisError { + ///

You aren't authorized to perform the action.

+ AccessDenied(String), + ///

Amazon Textract isn't able to read the document.

+ BadDocument(String), + ///

The document can't be processed because it's too large. The maximum document size for synchronous operations 5 MB. The maximum document size for asynchronous operations is 500 MB for PDF format files.

+ DocumentTooLarge(String), + ///

A ClientRequestToken input parameter was reused with an operation, but at least one of the other input parameters is different from the previous call to the operation.

+ IdempotentParameterMismatch(String), + ///

Amazon Textract experienced a service issue. Try your call again.

+ InternalServerError(String), + ///

An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

+ InvalidParameter(String), + ///

Amazon Textract is unable to access the S3 object that's specified in the request.

+ InvalidS3Object(String), + ///

An Amazon Textract service limit was exceeded. For example, if you start too many asynchronous jobs concurrently, calls to start operations (StartDocumentTextDetection, for example) raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Textract service limit.

+ LimitExceeded(String), + ///

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

+ ProvisionedThroughputExceeded(String), + ///

Amazon Textract is temporarily unable to process the request. Try your call again.

+ Throttling(String), + ///

The format of the input document isn't supported. Amazon Textract supports documents that are .png or .jpg format.

+ UnsupportedDocument(String), +} + +impl StartDocumentAnalysisError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(StartDocumentAnalysisError::AccessDenied(err.msg)) + } + "BadDocumentException" => { + return RusotoError::Service(StartDocumentAnalysisError::BadDocument(err.msg)) + } + "DocumentTooLargeException" => { + return RusotoError::Service(StartDocumentAnalysisError::DocumentTooLarge( + err.msg, + )) + } + "IdempotentParameterMismatchException" => { + return RusotoError::Service( + StartDocumentAnalysisError::IdempotentParameterMismatch(err.msg), + ) + } + "InternalServerError" => { + return RusotoError::Service(StartDocumentAnalysisError::InternalServerError( + err.msg, + )) + } + "InvalidParameterException" => { + return RusotoError::Service(StartDocumentAnalysisError::InvalidParameter( + err.msg, + )) + } + "InvalidS3ObjectException" => { + return RusotoError::Service(StartDocumentAnalysisError::InvalidS3Object( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(StartDocumentAnalysisError::LimitExceeded(err.msg)) + } + "ProvisionedThroughputExceededException" => { + return RusotoError::Service( + StartDocumentAnalysisError::ProvisionedThroughputExceeded(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(StartDocumentAnalysisError::Throttling(err.msg)) + } + "UnsupportedDocumentException" => { + return RusotoError::Service(StartDocumentAnalysisError::UnsupportedDocument( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartDocumentAnalysisError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartDocumentAnalysisError { + fn description(&self) -> &str { + match *self { + StartDocumentAnalysisError::AccessDenied(ref cause) => cause, + StartDocumentAnalysisError::BadDocument(ref cause) => cause, + StartDocumentAnalysisError::DocumentTooLarge(ref cause) => cause, + StartDocumentAnalysisError::IdempotentParameterMismatch(ref cause) => cause, + StartDocumentAnalysisError::InternalServerError(ref cause) => cause, + StartDocumentAnalysisError::InvalidParameter(ref cause) => cause, + StartDocumentAnalysisError::InvalidS3Object(ref cause) => cause, + StartDocumentAnalysisError::LimitExceeded(ref cause) => cause, + StartDocumentAnalysisError::ProvisionedThroughputExceeded(ref cause) => cause, + StartDocumentAnalysisError::Throttling(ref cause) => cause, + StartDocumentAnalysisError::UnsupportedDocument(ref cause) => cause, + } + } +} +/// Errors returned by StartDocumentTextDetection +#[derive(Debug, PartialEq)] +pub enum StartDocumentTextDetectionError { + ///

You aren't authorized to perform the action.

+ AccessDenied(String), + ///

Amazon Textract isn't able to read the document.

+ BadDocument(String), + ///

The document can't be processed because it's too large. The maximum document size for synchronous operations 5 MB. The maximum document size for asynchronous operations is 500 MB for PDF format files.

+ DocumentTooLarge(String), + ///

A ClientRequestToken input parameter was reused with an operation, but at least one of the other input parameters is different from the previous call to the operation.

+ IdempotentParameterMismatch(String), + ///

Amazon Textract experienced a service issue. Try your call again.

+ InternalServerError(String), + ///

An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

+ InvalidParameter(String), + ///

Amazon Textract is unable to access the S3 object that's specified in the request.

+ InvalidS3Object(String), + ///

An Amazon Textract service limit was exceeded. For example, if you start too many asynchronous jobs concurrently, calls to start operations (StartDocumentTextDetection, for example) raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Textract service limit.

+ LimitExceeded(String), + ///

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

+ ProvisionedThroughputExceeded(String), + ///

Amazon Textract is temporarily unable to process the request. Try your call again.

+ Throttling(String), + ///

The format of the input document isn't supported. Amazon Textract supports documents that are .png or .jpg format.

+ UnsupportedDocument(String), +} + +impl StartDocumentTextDetectionError { + pub fn from_response( + res: BufferedHttpResponse, + ) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(StartDocumentTextDetectionError::AccessDenied( + err.msg, + )) + } + "BadDocumentException" => { + return RusotoError::Service(StartDocumentTextDetectionError::BadDocument( + err.msg, + )) + } + "DocumentTooLargeException" => { + return RusotoError::Service(StartDocumentTextDetectionError::DocumentTooLarge( + err.msg, + )) + } + "IdempotentParameterMismatchException" => { + return RusotoError::Service( + StartDocumentTextDetectionError::IdempotentParameterMismatch(err.msg), + ) + } + "InternalServerError" => { + return RusotoError::Service( + StartDocumentTextDetectionError::InternalServerError(err.msg), + ) + } + "InvalidParameterException" => { + return RusotoError::Service(StartDocumentTextDetectionError::InvalidParameter( + err.msg, + )) + } + "InvalidS3ObjectException" => { + return RusotoError::Service(StartDocumentTextDetectionError::InvalidS3Object( + err.msg, + )) + } + "LimitExceededException" => { + return RusotoError::Service(StartDocumentTextDetectionError::LimitExceeded( + err.msg, + )) + } + "ProvisionedThroughputExceededException" => { + return RusotoError::Service( + StartDocumentTextDetectionError::ProvisionedThroughputExceeded(err.msg), + ) + } + "ThrottlingException" => { + return RusotoError::Service(StartDocumentTextDetectionError::Throttling( + err.msg, + )) + } + "UnsupportedDocumentException" => { + return RusotoError::Service( + StartDocumentTextDetectionError::UnsupportedDocument(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartDocumentTextDetectionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartDocumentTextDetectionError { + fn description(&self) -> &str { + match *self { + StartDocumentTextDetectionError::AccessDenied(ref cause) => cause, + StartDocumentTextDetectionError::BadDocument(ref cause) => cause, + StartDocumentTextDetectionError::DocumentTooLarge(ref cause) => cause, + StartDocumentTextDetectionError::IdempotentParameterMismatch(ref cause) => cause, + StartDocumentTextDetectionError::InternalServerError(ref cause) => cause, + StartDocumentTextDetectionError::InvalidParameter(ref cause) => cause, + StartDocumentTextDetectionError::InvalidS3Object(ref cause) => cause, + StartDocumentTextDetectionError::LimitExceeded(ref cause) => cause, + StartDocumentTextDetectionError::ProvisionedThroughputExceeded(ref cause) => cause, + StartDocumentTextDetectionError::Throttling(ref cause) => cause, + StartDocumentTextDetectionError::UnsupportedDocument(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the Amazon Textract API. Amazon Textract clients implement this trait. +pub trait Textract { + ///

Analyzes an input document for relationships between detected items.

The types of information returned are as follows:

  • Words and lines that are related to nearby lines and words. The related information is returned in two Block objects each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value.

  • Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table.

  • Selectable elements such as checkboxes and radio buttons. A SELECTION_ELEMENT Block object contains information about a selectable element.

  • Lines and words of text. A LINE Block object contains one or more WORD Block objects.

You can choose which type of analysis to perform by specifying the FeatureTypes list.

The output is returned in a list of BLOCK objects.

AnalyzeDocument is a synchronous operation. To analyze documents asynchronously, use StartDocumentAnalysis.

For more information, see Document Text Analysis.

+ fn analyze_document( + &self, + input: AnalyzeDocumentRequest, + ) -> RusotoFuture; + + ///

Detects text in the input document. Amazon Textract can detect lines of text and the words that make up a line of text. The input document must be an image in JPG or PNG format. DetectDocumentText returns the detected text in an array of Block objects.

Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

DetectDocumentText is a synchronous operation. To analyze documents asynchronously, use StartDocumentTextDetection.

For more information, see Document Text Detection.

+ fn detect_document_text( + &self, + input: DetectDocumentTextRequest, + ) -> RusotoFuture; + + ///

Gets the results for an Amazon Textract asynchronous operation that analyzes text in a document.

You start asynchronous text analysis by calling StartDocumentAnalysis, which returns a job identifier (JobId). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentAnalysis. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis.

GetDocumentAnalysis returns an array of Block objects. The following types of information are returned:

  • Words and lines that are related to nearby lines and words. The related information is returned in two Block objects each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value.

  • Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table.

  • Selectable elements such as checkboxes and radio buttons. A SELECTION_ELEMENT Block object contains information about a selectable element.

  • Lines and words of text. A LINE Block object contains one or more WORD Block objects.

Use the MaxResults parameter to limit the number of blocks returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentAnalysis, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentAnalysis.

For more information, see Document Text Analysis.

+ fn get_document_analysis( + &self, + input: GetDocumentAnalysisRequest, + ) -> RusotoFuture; + + ///

Gets the results for an Amazon Textract asynchronous operation that detects text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

You start asynchronous text detection by calling StartDocumentTextDetection, which returns a job identifier (JobId). When the text detection operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentTextDetection. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

GetDocumentTextDetection returns an array of Block objects.

Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentTextDetection, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentTextDetection.

For more information, see Document Text Detection.

+ fn get_document_text_detection( + &self, + input: GetDocumentTextDetectionRequest, + ) -> RusotoFuture; + + ///

Starts asynchronous analysis of an input document for relationships between detected items such as key and value pairs, tables, and selection elements.

StartDocumentAnalysis can analyze text in documents that are in JPG, PNG, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document.

StartDocumentAnalysis returns a job identifier (JobId) that you use to get the results of the operation. When text analysis is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis.

For more information, see Document Text Analysis.

+ fn start_document_analysis( + &self, + input: StartDocumentAnalysisRequest, + ) -> RusotoFuture; + + ///

Starts the asynchronous detection of text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

StartDocumentTextDetection can analyze text in documents that are in JPG, PNG, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document.

StartTextDetection returns a job identifier (JobId) that you use to get the results of the operation. When text detection is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

For more information, see Document Text Detection.

+ fn start_document_text_detection( + &self, + input: StartDocumentTextDetectionRequest, + ) -> RusotoFuture; +} +/// A client for the Amazon Textract API. +#[derive(Clone)] +pub struct TextractClient { + client: Client, + region: region::Region, +} + +impl TextractClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> TextractClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> TextractClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> TextractClient { + TextractClient { client, region } + } +} + +impl Textract for TextractClient { + ///

Analyzes an input document for relationships between detected items.

The types of information returned are as follows:

  • Words and lines that are related to nearby lines and words. The related information is returned in two Block objects each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value.

  • Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table.

  • Selectable elements such as checkboxes and radio buttons. A SELECTION_ELEMENT Block object contains information about a selectable element.

  • Lines and words of text. A LINE Block object contains one or more WORD Block objects.

You can choose which type of analysis to perform by specifying the FeatureTypes list.

The output is returned in a list of BLOCK objects.

AnalyzeDocument is a synchronous operation. To analyze documents asynchronously, use StartDocumentAnalysis.

For more information, see Document Text Analysis.

+ fn analyze_document( + &self, + input: AnalyzeDocumentRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "textract", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "Textract.AnalyzeDocument"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(AnalyzeDocumentError::from_response(response))), + ) + } + }) + } + + ///

Detects text in the input document. Amazon Textract can detect lines of text and the words that make up a line of text. The input document must be an image in JPG or PNG format. DetectDocumentText returns the detected text in an array of Block objects.

Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

DetectDocumentText is a synchronous operation. To analyze documents asynchronously, use StartDocumentTextDetection.

For more information, see Document Text Detection.

+ fn detect_document_text( + &self, + input: DetectDocumentTextRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "textract", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "Textract.DetectDocumentText"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DetectDocumentTextError::from_response(response))), + ) + } + }) + } + + ///

Gets the results for an Amazon Textract asynchronous operation that analyzes text in a document.

You start asynchronous text analysis by calling StartDocumentAnalysis, which returns a job identifier (JobId). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentAnalysis. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis.

GetDocumentAnalysis returns an array of Block objects. The following types of information are returned:

  • Words and lines that are related to nearby lines and words. The related information is returned in two Block objects each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value.

  • Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table.

  • Selectable elements such as checkboxes and radio buttons. A SELECTION_ELEMENT Block object contains information about a selectable element.

  • Lines and words of text. A LINE Block object contains one or more WORD Block objects.

Use the MaxResults parameter to limit the number of blocks returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentAnalysis, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentAnalysis.

For more information, see Document Text Analysis.

+ fn get_document_analysis( + &self, + input: GetDocumentAnalysisRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "textract", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "Textract.GetDocumentAnalysis"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(GetDocumentAnalysisError::from_response(response)) + }), + ) + } + }) + } + + ///

Gets the results for an Amazon Textract asynchronous operation that detects text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

You start asynchronous text detection by calling StartDocumentTextDetection, which returns a job identifier (JobId). When the text detection operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentTextDetection. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

GetDocumentTextDetection returns an array of Block objects.

Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentTextDetection, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentTextDetection.

For more information, see Document Text Detection.

+ fn get_document_text_detection( + &self, + input: GetDocumentTextDetectionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "textract", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "Textract.GetDocumentTextDetection"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(GetDocumentTextDetectionError::from_response(response)) + })) + } + }) + } + + ///

Starts asynchronous analysis of an input document for relationships between detected items such as key and value pairs, tables, and selection elements.

StartDocumentAnalysis can analyze text in documents that are in JPG, PNG, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document.

StartDocumentAnalysis returns a job identifier (JobId) that you use to get the results of the operation. When text analysis is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis.

For more information, see Document Text Analysis.

+ fn start_document_analysis( + &self, + input: StartDocumentAnalysisRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "textract", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "Textract.StartDocumentAnalysis"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(StartDocumentAnalysisError::from_response(response)) + }), + ) + } + }) + } + + ///

Starts the asynchronous detection of text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

StartDocumentTextDetection can analyze text in documents that are in JPG, PNG, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document.

StartTextDetection returns a job identifier (JobId) that you use to get the results of the operation. When text detection is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

For more information, see Document Text Detection.

+ fn start_document_text_detection( + &self, + input: StartDocumentTextDetectionRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "textract", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "Textract.StartDocumentTextDetection"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new(response.buffer().from_err().and_then(|response| { + Err(StartDocumentTextDetectionError::from_response(response)) + })) + } + }) + } +} diff --git a/rusoto/services/textract/src/lib.rs b/rusoto/services/textract/src/lib.rs new file mode 100644 index 00000000000..9554ec9072c --- /dev/null +++ b/rusoto/services/textract/src/lib.rs @@ -0,0 +1,32 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

Amazon Textract detects and analyzes text in documents and converts it into machine-readable text. This is the API reference documentation for Amazon Textract.

+//! +//! If you're using the service, you're probably looking for [TextractClient](struct.TextractClient.html) and [Textract](trait.Textract.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/transcribe/Cargo.toml b/rusoto/services/transcribe/Cargo.toml index 52a11589a74..ced3b5a0aa5 100644 --- a/rusoto/services/transcribe/Cargo.toml +++ b/rusoto/services/transcribe/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_transcribe" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/transcribe/README.md b/rusoto/services/transcribe/README.md index a925cca85b7..b109c497e24 100644 --- a/rusoto/services/transcribe/README.md +++ b/rusoto/services/transcribe/README.md @@ -23,9 +23,16 @@ To use `rusoto_transcribe` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_transcribe = "0.40.0" +rusoto_transcribe = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/transcribe/src/custom/mod.rs b/rusoto/services/transcribe/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/transcribe/src/custom/mod.rs +++ b/rusoto/services/transcribe/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/transcribe/src/generated.rs b/rusoto/services/transcribe/src/generated.rs index 52169b16e94..0e33a976987 100644 --- a/rusoto/services/transcribe/src/generated.rs +++ b/rusoto/services/transcribe/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -43,7 +42,7 @@ pub struct CreateVocabularyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateVocabularyResponse { ///

If the VocabularyState field is FAILED, this field contains information about why the job failed.

#[serde(rename = "FailureReason")] @@ -89,7 +88,7 @@ pub struct GetTranscriptionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTranscriptionJobResponse { ///

An object that contains the results of the transcription job.

#[serde(rename = "TranscriptionJob")] @@ -105,7 +104,7 @@ pub struct GetVocabularyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetVocabularyResponse { ///

The S3 location where the vocabulary is stored. Use this URI to get the contents of the vocabulary. The URI is available for a limited time.

#[serde(rename = "DownloadUri")] @@ -154,7 +153,7 @@ pub struct ListTranscriptionJobsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTranscriptionJobsResponse { ///

The ListTranscriptionJobs operation returns a page of jobs at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the NextPage token. Include the token in the next request to the ListTranscriptionJobs operation to return in the next page of jobs.

#[serde(rename = "NextToken")] @@ -191,7 +190,7 @@ pub struct ListVocabulariesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListVocabulariesResponse { ///

The ListVocabularies operation returns a page of vocabularies at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the NextPage token. Include the token in the next request to the ListVocabularies operation to return in the next page of jobs.

#[serde(rename = "NextToken")] @@ -245,14 +244,15 @@ pub struct StartTranscriptionJobRequest { ///

An object that describes the input media for a transcription job.

#[serde(rename = "Media")] pub media: Media, - ///

The format of the input media file.

+ ///

The format of the input media file.

If you do not specify the format of the media file, Amazon Transcribe determines the format. If the format is not recognized, Amazon Transcribe returns an InternalFailureException exception. If you specify the format, it must match the format detected by Amazon Transcribe, otherwise you get an InternalFailureException exception.

#[serde(rename = "MediaFormat")] - pub media_format: String, - ///

The sample rate, in Hertz, of the audio track in the input media file.

+ #[serde(skip_serializing_if = "Option::is_none")] + pub media_format: Option, + ///

The sample rate of the audio track in the input media file in Hertz.

If you do not specify the media sample rate, Amazon Transcribe determines the sample rate. If you specify the sample rate, it must match the sample rate detected by Amazon Transcribe. In most cases, you should leave the MediaSampleRateHertz field blank and let Amazon Transcribe determine the sample rate.

#[serde(rename = "MediaSampleRateHertz")] #[serde(skip_serializing_if = "Option::is_none")] pub media_sample_rate_hertz: Option, - ///

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcription in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

Amazon Transcribe uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket. You can't specify your own encryption key.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

+ ///

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcription in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

Amazon Transcribe uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket. You can't specify your own encryption key.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

#[serde(rename = "OutputBucketName")] #[serde(skip_serializing_if = "Option::is_none")] pub output_bucket_name: Option, @@ -266,7 +266,7 @@ pub struct StartTranscriptionJobRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartTranscriptionJobResponse { ///

An object containing details of the asynchronous transcription job.

#[serde(rename = "TranscriptionJob")] @@ -276,7 +276,7 @@ pub struct StartTranscriptionJobResponse { ///

Identifies the location of a transcription.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Transcript { ///

The location where the transcription is stored.

Use this URI to access the transcription. If you specified an S3 bucket in the OutputBucketName field when you created the job, this is the URI of that bucket. If you chose to store the transcription in Amazon Transcribe, this is a shareable URL that provides secure access to that location.

#[serde(rename = "TranscriptFileUri")] @@ -286,7 +286,7 @@ pub struct Transcript { ///

Describes an asynchronous transcription job that was created with the StartTranscriptionJob operation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TranscriptionJob { ///

A timestamp that shows when the job was completed.

#[serde(rename = "CompletionTime")] @@ -336,7 +336,7 @@ pub struct TranscriptionJob { ///

Provides a summary of information about a transcription job. .

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TranscriptionJobSummary { ///

A timestamp that shows when the job was completed.

#[serde(rename = "CompletionTime")] @@ -387,7 +387,7 @@ pub struct UpdateVocabularyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateVocabularyResponse { ///

The language code of the vocabulary entries.

#[serde(rename = "LanguageCode")] @@ -409,7 +409,7 @@ pub struct UpdateVocabularyResponse { ///

Provides information about a custom vocabulary.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct VocabularyInfo { ///

The language code of the vocabulary entries.

#[serde(rename = "LanguageCode")] @@ -952,10 +952,7 @@ impl TranscribeClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> TranscribeClient { - TranscribeClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -969,10 +966,14 @@ impl TranscribeClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - TranscribeClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> TranscribeClient { + TranscribeClient { client, region } } } diff --git a/rusoto/services/transfer/Cargo.toml b/rusoto/services/transfer/Cargo.toml new file mode 100644 index 00000000000..21f8d006d48 --- /dev/null +++ b/rusoto/services/transfer/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["Anthony DiMarco ", "Jimmy Cuadra ", "Matthew Mayer ", "Nikita Pekin "] +description = "AWS SDK for Rust - AWS Transfer for SFTP @ 2018-11-05" +documentation = "https://docs.rs/rusoto_transfer" +keywords = ["AWS", "Amazon", "transfer"] +license = "MIT" +name = "rusoto_transfer" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" +exclude = ["test_resources/*"] + +[build-dependencies] + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +serde = "1.0.2" +serde_derive = "1.0.2" +serde_json = "1.0.1" + +[dependencies.rusoto_core] +version = "0.41.0" +path = "../../core" +default-features = false +[dev-dependencies.rusoto_mock] +version = "0.41.0" +path = "../../../mock" +default-features = false + +[features] +default = ["native-tls"] +native-tls = ["rusoto_core/native-tls"] +rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/transfer/README.md b/rusoto/services/transfer/README.md new file mode 100644 index 00000000000..056cfddaf57 --- /dev/null +++ b/rusoto/services/transfer/README.md @@ -0,0 +1,52 @@ + +# Rusoto Transfer +Rust SDK for AWS Transfer for SFTP + +You may be looking for: + +* [An overview of Rusoto][rusoto-overview] +* [AWS services supported by Rusoto][supported-aws-services] +* [API documentation][api-documentation] +* [Getting help with Rusoto][rusoto-help] + +## Requirements + +Rust stable or beta are required to use Rusoto. Nightly is tested, but not guaranteed to be supported. Older +versions _may_ be supported. The currently supported Rust versions can be found in the Rusoto project +[`travis.yml`](https://github.com/rusoto/rusoto/blob/master/.travis.yml). + +On Linux, OpenSSL is required. + +## Installation + +To use `rusoto_transfer` in your application, add it as a dependency in your `Cargo.toml`: + +```toml +[dependencies] +rusoto_transfer = "0.41.0" +``` + +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + +## Contributing + +See [CONTRIBUTING][contributing]. + +## License + +Rusoto is distributed under the terms of the MIT license. + +See [LICENSE][license] for details. + +[api-documentation]: https://docs.rs/rusoto_transfer "API documentation" +[license]: https://github.com/rusoto/rusoto/blob/master/LICENSE "MIT License" +[contributing]: https://github.com/rusoto/rusoto/blob/master/CONTRIBUTING.md "Contributing Guide" +[rusoto-help]: https://www.rusoto.org/help.html "Getting help with Rusoto" +[rusoto-overview]: https://www.rusoto.org/ "Rusoto overview" +[supported-aws-services]: https://www.rusoto.org/supported-aws-services.html "List of AWS services supported by Rusoto" + diff --git a/rusoto/services/transfer/src/custom/mod.rs b/rusoto/services/transfer/src/custom/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rusoto/services/transfer/src/generated.rs b/rusoto/services/transfer/src/generated.rs new file mode 100644 index 00000000000..4bcc04f0e10 --- /dev/null +++ b/rusoto/services/transfer/src/generated.rs @@ -0,0 +1,2164 @@ +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= +#![allow(warnings)] + +use futures::future; +use futures::Future; +use rusoto_core::credential::ProvideAwsCredentials; +use rusoto_core::region; +use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; +use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; + +use rusoto_core::proto; +use rusoto_core::signature::SignedRequest; +use serde_json; +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateServerRequest { + ///

The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. This parameter is required when you specify a value for the EndpointType parameter.

+ #[serde(rename = "EndpointDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_details: Option, + ///

The type of VPC endpoint that you want your SFTP server to connect to. If you connect to a VPC endpoint, your SFTP server isn't accessible over the public internet.

+ #[serde(rename = "EndpointType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_type: Option, + ///

The RSA private key as generated by the ssh-keygen -N "" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/change-host-key" in the AWS SFTP User Guide.

+ #[serde(rename = "HostKey")] + #[serde(skip_serializing_if = "Option::is_none")] + pub host_key: Option, + ///

This parameter is required when the IdentityProviderType is set to API_GATEWAY. Accepts an array containing all of the information required to call a customer-supplied authentication API, including the API Gateway URL. This property is not required when the IdentityProviderType is set to SERVICE_MANAGED.

+ #[serde(rename = "IdentityProviderDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_provider_details: Option, + ///

Specifies the mode of authentication for the SFTP server. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

+ #[serde(rename = "IdentityProviderType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_provider_type: Option, + ///

A value that allows the service to write your SFTP users' activity to your Amazon CloudWatch logs for monitoring and auditing purposes.

+ #[serde(rename = "LoggingRole")] + #[serde(skip_serializing_if = "Option::is_none")] + pub logging_role: Option, + ///

Key-value pairs that can be used to group and search for servers.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateServerResponse { + ///

The service-assigned ID of the SFTP server that is created.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CreateUserRequest { + ///

The landing directory (folder) for a user when they log in to the server using their SFTP client. An example is /home/username .

+ #[serde(rename = "HomeDirectory")] + #[serde(skip_serializing_if = "Option::is_none")] + pub home_directory: Option, + ///

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating a Scope-Down Policy.

For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" in the AWS Security Token Service API Reference.

+ #[serde(rename = "Policy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub policy: Option, + ///

The IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user's transfer requests.

+ #[serde(rename = "Role")] + pub role: String, + ///

A system-assigned unique identifier for an SFTP server instance. This is the specific SFTP server that you added your user to.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

The public portion of the Secure Shell (SSH) key used to authenticate the user to the SFTP server.

+ #[serde(rename = "SshPublicKeyBody")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ssh_public_key_body: Option, + ///

Key-value pairs that can be used to group and search for users. Tags are metadata attached to users for any purpose.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

A unique string that identifies a user and is associated with a server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CreateUserResponse { + ///

The ID of the SFTP server that the user is attached to.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

A unique string that identifies a user account associated with an SFTP server.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteServerRequest { + ///

A unique system-assigned identifier for an SFTP server instance.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteSshPublicKeyRequest { + ///

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server instance that has the user assigned to it.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

A unique identifier used to reference your user's specific SSH key.

+ #[serde(rename = "SshPublicKeyId")] + pub ssh_public_key_id: String, + ///

A unique string that identifies a user whose public key is being deleted.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DeleteUserRequest { + ///

A system-assigned unique identifier for an SFTP server instance that has the user assigned to it.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

A unique string that identifies a user that is being deleted from the server.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeServerRequest { + ///

A system-assigned unique identifier for an SFTP server.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeServerResponse { + ///

An array containing the properties of the server with the ServerID you specified.

+ #[serde(rename = "Server")] + pub server: DescribedServer, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct DescribeUserRequest { + ///

A system-assigned unique identifier for an SFTP server that has this user assigned.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer for SFTP service and perform file transfer tasks.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribeUserResponse { + ///

A system-assigned unique identifier for an SFTP server that has this user assigned.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

An array containing the properties of the user account for the ServerID value that you specified.

+ #[serde(rename = "User")] + pub user: DescribedUser, +} + +///

Describes the properties of the server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, the server ID and state, and assigned tags or metadata.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribedServer { + ///

Specifies the unique Amazon Resource Name (ARN) for the server to be described.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

The virtual private cloud (VPC) endpoint settings that you configured for your SFTP server.

+ #[serde(rename = "EndpointDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_details: Option, + ///

The type of endpoint that your SFTP server is connected to. If your SFTP server is connected to a VPC endpoint, your server isn't accessible over the public internet.

+ #[serde(rename = "EndpointType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_type: Option, + ///

This value contains the message-digest algorithm (MD5) hash of the server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command.

+ #[serde(rename = "HostKeyFingerprint")] + #[serde(skip_serializing_if = "Option::is_none")] + pub host_key_fingerprint: Option, + ///

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of the server is SERVICE_MANAGED>.

+ #[serde(rename = "IdentityProviderDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_provider_details: Option, + ///

This property defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this server to store and access SFTP user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

+ #[serde(rename = "IdentityProviderType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_provider_type: Option, + ///

This property is an AWS Identity and Access Management (IAM) entity that allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

+ #[serde(rename = "LoggingRole")] + #[serde(skip_serializing_if = "Option::is_none")] + pub logging_role: Option, + ///

This property is a unique system-assigned identifier for the SFTP server that you instantiate.

+ #[serde(rename = "ServerId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub server_id: Option, + ///

The condition of the SFTP server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, + ///

This property contains the key-value pairs that you can use to search for and group servers that were assigned to the server that was described.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

The number of users that are assigned to the SFTP server you specified with the ServerId.

+ #[serde(rename = "UserCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_count: Option, +} + +///

Returns properties of the user that you want to describe.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct DescribedUser { + ///

This property contains the unique Amazon Resource Name (ARN) for the user that was requested to be described.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

This property specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example is /bucket_name/home/username .

+ #[serde(rename = "HomeDirectory")] + #[serde(skip_serializing_if = "Option::is_none")] + pub home_directory: Option, + ///

Specifies the name of the policy in use for the described user.

+ #[serde(rename = "Policy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub policy: Option, + ///

This property specifies the IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user's transfer requests.

+ #[serde(rename = "Role")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role: Option, + ///

This property contains the public key portion of the Secure Shell (SSH) keys stored for the described user.

+ #[serde(rename = "SshPublicKeys")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ssh_public_keys: Option>, + ///

This property contains the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + ///

This property is the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your SFTP server.

+ #[serde(rename = "UserName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_name: Option, +} + +///

The configuration settings for the virtual private cloud (VPC) endpoint for your SFTP server.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct EndpointDetails { + ///

The ID of the VPC endpoint.

+ #[serde(rename = "VpcEndpointId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub vpc_endpoint_id: Option, +} + +///

Returns information related to the type of user authentication that is in use for a server's users. A server can have only one method of authentication.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IdentityProviderDetails { + ///

The InvocationRole parameter provides the type of InvocationRole used to authenticate the user account.

+ #[serde(rename = "InvocationRole")] + #[serde(skip_serializing_if = "Option::is_none")] + pub invocation_role: Option, + ///

The Url parameter provides contains the location of the service endpoint used to authenticate users.

+ #[serde(rename = "Url")] + #[serde(skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ImportSshPublicKeyRequest { + ///

A system-assigned unique identifier for an SFTP server.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

The public key portion of an SSH key pair.

+ #[serde(rename = "SshPublicKeyBody")] + pub ssh_public_key_body: String, + ///

The name of the user account that is assigned to one or more servers.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +///

This response identifies the user, the server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ImportSshPublicKeyResponse { + ///

A system-assigned unique identifier for an SFTP server.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

This identifier is the name given to a public key by the system that was imported.

+ #[serde(rename = "SshPublicKeyId")] + pub ssh_public_key_id: String, + ///

A user name assigned to the ServerID value that you specified.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListServersRequest { + ///

Specifies the number of servers to return as a response to the ListServers query.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListServersResponse { + ///

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional servers.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

An array of servers that were listed.

+ #[serde(rename = "Servers")] + pub servers: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + ///

Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific AWS resource, such as a server, user, or role.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

Specifies the number of tags to return as a response to the ListTagsForResource request.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

When you request additional results from the ListTagsForResource operation, a NextToken parameter is returned in the input. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + ///

This value is the ARN you specified to list the tags of.

+ #[serde(rename = "Arn")] + #[serde(skip_serializing_if = "Option::is_none")] + pub arn: Option, + ///

When you can get additional results from the ListTagsForResource call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListUsersRequest { + ///

Specifies the number of users to return as a response to the ListUsers request.

+ #[serde(rename = "MaxResults")] + #[serde(skip_serializing_if = "Option::is_none")] + pub max_results: Option, + ///

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional users.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server that has users assigned to it.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListUsersResponse { + ///

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional users.

+ #[serde(rename = "NextToken")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, + ///

A system-assigned unique identifier for an SFTP server that the users are assigned to.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

Returns the user accounts and their properties for the ServerId value that you specify.

+ #[serde(rename = "Users")] + pub users: Vec, +} + +///

Returns properties of the server that was specified.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListedServer { + ///

The unique Amazon Resource Name (ARN) for the server to be listed.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

The type of VPC endpoint that your SFTP server is connected to. If your SFTP server is connected to a VPC endpoint, your server isn't accessible over the public internet.

+ #[serde(rename = "EndpointType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_type: Option, + ///

The authentication method used to validate a user for the server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

+ #[serde(rename = "IdentityProviderType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_provider_type: Option, + ///

The AWS Identity and Access Management entity that allows the server to turn on Amazon CloudWatch logging.

+ #[serde(rename = "LoggingRole")] + #[serde(skip_serializing_if = "Option::is_none")] + pub logging_role: Option, + ///

This value is the unique system assigned identifier for the SFTP servers that were listed.

+ #[serde(rename = "ServerId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub server_id: Option, + ///

This property describes the condition of the SFTP server for the server that was described. A value of ONLINE> indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

+ #[serde(rename = "State")] + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, + ///

This property is a numeric value that indicates the number of users that are assigned to the SFTP server you specified with the ServerId.

+ #[serde(rename = "UserCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_count: Option, +} + +///

Returns properties of the user that you specify.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListedUser { + ///

This property is the unique Amazon Resource Name (ARN) for the user that you want to learn about.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

This value specifies the location that files are written to or read from an Amazon S3 bucket for the user you specify by their ARN.

+ #[serde(rename = "HomeDirectory")] + #[serde(skip_serializing_if = "Option::is_none")] + pub home_directory: Option, + ///

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows the SFTP server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

+ #[serde(rename = "Role")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role: Option, + ///

This value is the number of SSH public keys stored for the user you specified.

+ #[serde(rename = "SshPublicKeyCount")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ssh_public_key_count: Option, + ///

The name of the user whose ARN was specified. User names are used for authentication purposes.

+ #[serde(rename = "UserName")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_name: Option, +} + +///

Provides information about the public Secure Shell (SSH) key that is associated with a user account for a specific server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific SFTP server.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct SshPublicKey { + ///

The date that the public key was added to the user account.

+ #[serde(rename = "DateImported")] + pub date_imported: f64, + ///

The content of the SSH public key as specified by the PublicKeyId.

+ #[serde(rename = "SshPublicKeyBody")] + pub ssh_public_key_body: String, + ///

The SshPublicKeyId parameter contains the identifier of the public key.

+ #[serde(rename = "SshPublicKeyId")] + pub ssh_public_key_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StartServerRequest { + ///

A system-assigned unique identifier for an SFTP server that you start.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct StopServerRequest { + ///

A system-assigned unique identifier for an SFTP server that you stopped.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +///

Creates a key-value pair for a specific resource. Tags are metadata that you can use to search for and group a resource for various purposes. You can apply tags to servers, users, and roles. A tag key can take more than one value. For example, to group servers for accounting purposes, you might create a tag called Group and assign the values Research and Accounting to that group.

+#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Tag { + ///

The name assigned to the tag that you create.

+ #[serde(rename = "Key")] + pub key: String, + ///

This property contains one or more values that you assigned to the key name you create.

+ #[serde(rename = "Value")] + pub value: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + ///

An Amazon Resource Name (ARN) for a specific AWS resource, such as a server, user, or role.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to user accounts for any purpose.

+ #[serde(rename = "Tags")] + pub tags: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TestIdentityProviderRequest { + ///

A system-assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

This request parameter is the name of the user account to be tested.

+ #[serde(rename = "UserName")] + pub user_name: String, + ///

The password of the user account to be tested.

+ #[serde(rename = "UserPassword")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_password: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TestIdentityProviderResponse { + ///

A message that indicates whether the test was successful or not.

+ #[serde(rename = "Message")] + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + ///

The response that is returned from your API Gateway.

+ #[serde(rename = "Response")] + #[serde(skip_serializing_if = "Option::is_none")] + pub response: Option, + ///

The HTTP status code that is the response from your API Gateway.

+ #[serde(rename = "StatusCode")] + pub status_code: i64, + ///

The endpoint of the service used to authenticate a user.

+ #[serde(rename = "Url")] + pub url: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + ///

This is the value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.

+ #[serde(rename = "Arn")] + pub arn: String, + ///

TagKeys are key-value pairs assigned to ARNs that can be used to group and search for resources by type. This metadata can be attached to resources for any purpose.

+ #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateServerRequest { + ///

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, your SFTP server isn't accessible over the public internet.

+ #[serde(rename = "EndpointDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_details: Option, + ///

The type of endpoint that you want your SFTP server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, your SFTP server isn't accessible over the public internet.

+ #[serde(rename = "EndpointType")] + #[serde(skip_serializing_if = "Option::is_none")] + pub endpoint_type: Option, + ///

The RSA private key as generated by ssh-keygen -N "" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" in the AWS SFTP User Guide.

+ #[serde(rename = "HostKey")] + #[serde(skip_serializing_if = "Option::is_none")] + pub host_key: Option, + ///

This response parameter is an array containing all of the information required to call a customer's authentication API method.

+ #[serde(rename = "IdentityProviderDetails")] + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_provider_details: Option, + ///

A value that changes the AWS Identity and Access Management (IAM) role that allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging on or off.

+ #[serde(rename = "LoggingRole")] + #[serde(skip_serializing_if = "Option::is_none")] + pub logging_role: Option, + ///

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateServerResponse { + ///

A system-assigned unique identifier for an SFTP server that the user account is assigned to.

+ #[serde(rename = "ServerId")] + pub server_id: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UpdateUserRequest { + ///

A parameter that specifies the landing directory (folder) for a user when they log in to the server using their client. An example is /home/username .

+ #[serde(rename = "HomeDirectory")] + #[serde(skip_serializing_if = "Option::is_none")] + pub home_directory: Option, + ///

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating a Scope-Down Policy.

For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" in the AWS Security Token Service API Reference.

+ #[serde(rename = "Policy")] + #[serde(skip_serializing_if = "Option::is_none")] + pub policy: Option, + ///

The IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the Secure File Transfer Protocol (SFTP) server to access your resources when servicing your SFTP user's transfer requests.

+ #[serde(rename = "Role")] + #[serde(skip_serializing_if = "Option::is_none")] + pub role: Option, + ///

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

A unique string that identifies a user and is associated with a server as specified by the ServerId. This is the string that will be used by your user when they log in to your SFTP server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +///

UpdateUserResponse returns the user name and server identifier for the request to update a user's properties.

+#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UpdateUserResponse { + ///

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

+ #[serde(rename = "ServerId")] + pub server_id: String, + ///

The unique identifier for a user that is assigned to the SFTP server instance that was specified in the request.

+ #[serde(rename = "UserName")] + pub user_name: String, +} + +/// Errors returned by CreateServer +#[derive(Debug, PartialEq)] +pub enum CreateServerError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The requested resource does not exist.

+ ResourceExists(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl CreateServerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(CreateServerError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(CreateServerError::InvalidRequest(err.msg)) + } + "ResourceExistsException" => { + return RusotoError::Service(CreateServerError::ResourceExists(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateServerError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateServerError { + fn description(&self) -> &str { + match *self { + CreateServerError::InternalServiceError(ref cause) => cause, + CreateServerError::InvalidRequest(ref cause) => cause, + CreateServerError::ResourceExists(ref cause) => cause, + CreateServerError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by CreateUser +#[derive(Debug, PartialEq)] +pub enum CreateUserError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The requested resource does not exist.

+ ResourceExists(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl CreateUserError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(CreateUserError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(CreateUserError::InvalidRequest(err.msg)) + } + "ResourceExistsException" => { + return RusotoError::Service(CreateUserError::ResourceExists(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(CreateUserError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(CreateUserError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CreateUserError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CreateUserError { + fn description(&self) -> &str { + match *self { + CreateUserError::InternalServiceError(ref cause) => cause, + CreateUserError::InvalidRequest(ref cause) => cause, + CreateUserError::ResourceExists(ref cause) => cause, + CreateUserError::ResourceNotFound(ref cause) => cause, + CreateUserError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by DeleteServer +#[derive(Debug, PartialEq)] +pub enum DeleteServerError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl DeleteServerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(DeleteServerError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(DeleteServerError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteServerError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteServerError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteServerError { + fn description(&self) -> &str { + match *self { + DeleteServerError::InternalServiceError(ref cause) => cause, + DeleteServerError::InvalidRequest(ref cause) => cause, + DeleteServerError::ResourceNotFound(ref cause) => cause, + DeleteServerError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by DeleteSshPublicKey +#[derive(Debug, PartialEq)] +pub enum DeleteSshPublicKeyError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl DeleteSshPublicKeyError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(DeleteSshPublicKeyError::InternalServiceError( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(DeleteSshPublicKeyError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteSshPublicKeyError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteSshPublicKeyError::ServiceUnavailable( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteSshPublicKeyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteSshPublicKeyError { + fn description(&self) -> &str { + match *self { + DeleteSshPublicKeyError::InternalServiceError(ref cause) => cause, + DeleteSshPublicKeyError::InvalidRequest(ref cause) => cause, + DeleteSshPublicKeyError::ResourceNotFound(ref cause) => cause, + DeleteSshPublicKeyError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by DeleteUser +#[derive(Debug, PartialEq)] +pub enum DeleteUserError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl DeleteUserError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(DeleteUserError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(DeleteUserError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DeleteUserError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DeleteUserError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DeleteUserError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DeleteUserError { + fn description(&self) -> &str { + match *self { + DeleteUserError::InternalServiceError(ref cause) => cause, + DeleteUserError::InvalidRequest(ref cause) => cause, + DeleteUserError::ResourceNotFound(ref cause) => cause, + DeleteUserError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by DescribeServer +#[derive(Debug, PartialEq)] +pub enum DescribeServerError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl DescribeServerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(DescribeServerError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(DescribeServerError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeServerError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeServerError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeServerError { + fn description(&self) -> &str { + match *self { + DescribeServerError::InternalServiceError(ref cause) => cause, + DescribeServerError::InvalidRequest(ref cause) => cause, + DescribeServerError::ResourceNotFound(ref cause) => cause, + DescribeServerError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by DescribeUser +#[derive(Debug, PartialEq)] +pub enum DescribeUserError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl DescribeUserError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(DescribeUserError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(DescribeUserError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(DescribeUserError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(DescribeUserError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for DescribeUserError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for DescribeUserError { + fn description(&self) -> &str { + match *self { + DescribeUserError::InternalServiceError(ref cause) => cause, + DescribeUserError::InvalidRequest(ref cause) => cause, + DescribeUserError::ResourceNotFound(ref cause) => cause, + DescribeUserError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by ImportSshPublicKey +#[derive(Debug, PartialEq)] +pub enum ImportSshPublicKeyError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The requested resource does not exist.

+ ResourceExists(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl ImportSshPublicKeyError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(ImportSshPublicKeyError::InternalServiceError( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(ImportSshPublicKeyError::InvalidRequest(err.msg)) + } + "ResourceExistsException" => { + return RusotoError::Service(ImportSshPublicKeyError::ResourceExists(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ImportSshPublicKeyError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ImportSshPublicKeyError::ServiceUnavailable( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ImportSshPublicKeyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ImportSshPublicKeyError { + fn description(&self) -> &str { + match *self { + ImportSshPublicKeyError::InternalServiceError(ref cause) => cause, + ImportSshPublicKeyError::InvalidRequest(ref cause) => cause, + ImportSshPublicKeyError::ResourceExists(ref cause) => cause, + ImportSshPublicKeyError::ResourceNotFound(ref cause) => cause, + ImportSshPublicKeyError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by ListServers +#[derive(Debug, PartialEq)] +pub enum ListServersError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

The NextToken parameter that was passed is invalid.

+ InvalidNextToken(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl ListServersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(ListServersError::InternalServiceError(err.msg)) + } + "InvalidNextTokenException" => { + return RusotoError::Service(ListServersError::InvalidNextToken(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(ListServersError::InvalidRequest(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListServersError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListServersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListServersError { + fn description(&self) -> &str { + match *self { + ListServersError::InternalServiceError(ref cause) => cause, + ListServersError::InvalidNextToken(ref cause) => cause, + ListServersError::InvalidRequest(ref cause) => cause, + ListServersError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

The NextToken parameter that was passed is invalid.

+ InvalidNextToken(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(ListTagsForResourceError::InternalServiceError( + err.msg, + )) + } + "InvalidNextTokenException" => { + return RusotoError::Service(ListTagsForResourceError::InvalidNextToken( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(ListTagsForResourceError::InvalidRequest(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListTagsForResourceError::ServiceUnavailable( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::InternalServiceError(ref cause) => cause, + ListTagsForResourceError::InvalidNextToken(ref cause) => cause, + ListTagsForResourceError::InvalidRequest(ref cause) => cause, + ListTagsForResourceError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by ListUsers +#[derive(Debug, PartialEq)] +pub enum ListUsersError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

The NextToken parameter that was passed is invalid.

+ InvalidNextToken(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl ListUsersError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(ListUsersError::InternalServiceError(err.msg)) + } + "InvalidNextTokenException" => { + return RusotoError::Service(ListUsersError::InvalidNextToken(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(ListUsersError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(ListUsersError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(ListUsersError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListUsersError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListUsersError { + fn description(&self) -> &str { + match *self { + ListUsersError::InternalServiceError(ref cause) => cause, + ListUsersError::InvalidNextToken(ref cause) => cause, + ListUsersError::InvalidRequest(ref cause) => cause, + ListUsersError::ResourceNotFound(ref cause) => cause, + ListUsersError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by StartServer +#[derive(Debug, PartialEq)] +pub enum StartServerError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl StartServerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(StartServerError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(StartServerError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(StartServerError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(StartServerError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StartServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StartServerError { + fn description(&self) -> &str { + match *self { + StartServerError::InternalServiceError(ref cause) => cause, + StartServerError::InvalidRequest(ref cause) => cause, + StartServerError::ResourceNotFound(ref cause) => cause, + StartServerError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by StopServer +#[derive(Debug, PartialEq)] +pub enum StopServerError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl StopServerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(StopServerError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(StopServerError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(StopServerError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(StopServerError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for StopServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for StopServerError { + fn description(&self) -> &str { + match *self { + StopServerError::InternalServiceError(ref cause) => cause, + StopServerError::InvalidRequest(ref cause) => cause, + StopServerError::ResourceNotFound(ref cause) => cause, + StopServerError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(TagResourceError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(TagResourceError::InvalidRequest(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(TagResourceError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::InternalServiceError(ref cause) => cause, + TagResourceError::InvalidRequest(ref cause) => cause, + TagResourceError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by TestIdentityProvider +#[derive(Debug, PartialEq)] +pub enum TestIdentityProviderError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl TestIdentityProviderError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(TestIdentityProviderError::InternalServiceError( + err.msg, + )) + } + "InvalidRequestException" => { + return RusotoError::Service(TestIdentityProviderError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(TestIdentityProviderError::ResourceNotFound( + err.msg, + )) + } + "ServiceUnavailableException" => { + return RusotoError::Service(TestIdentityProviderError::ServiceUnavailable( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TestIdentityProviderError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TestIdentityProviderError { + fn description(&self) -> &str { + match *self { + TestIdentityProviderError::InternalServiceError(ref cause) => cause, + TestIdentityProviderError::InvalidRequest(ref cause) => cause, + TestIdentityProviderError::ResourceNotFound(ref cause) => cause, + TestIdentityProviderError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(UntagResourceError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(UntagResourceError::InvalidRequest(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UntagResourceError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::InternalServiceError(ref cause) => cause, + UntagResourceError::InvalidRequest(ref cause) => cause, + UntagResourceError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by UpdateServer +#[derive(Debug, PartialEq)] +pub enum UpdateServerError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl UpdateServerError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(UpdateServerError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(UpdateServerError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateServerError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateServerError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateServerError { + fn description(&self) -> &str { + match *self { + UpdateServerError::InternalServiceError(ref cause) => cause, + UpdateServerError::InvalidRequest(ref cause) => cause, + UpdateServerError::ResourceNotFound(ref cause) => cause, + UpdateServerError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Errors returned by UpdateUser +#[derive(Debug, PartialEq)] +pub enum UpdateUserError { + ///

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

+ InternalServiceError(String), + ///

This exception is thrown when the client submits a malformed request.

+ InvalidRequest(String), + ///

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

+ ResourceNotFound(String), + ///

The request has failed because the AWS Transfer for SFTP service is not available.

+ ServiceUnavailable(String), +} + +impl UpdateUserError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "InternalServiceError" => { + return RusotoError::Service(UpdateUserError::InternalServiceError(err.msg)) + } + "InvalidRequestException" => { + return RusotoError::Service(UpdateUserError::InvalidRequest(err.msg)) + } + "ResourceNotFoundException" => { + return RusotoError::Service(UpdateUserError::ResourceNotFound(err.msg)) + } + "ServiceUnavailableException" => { + return RusotoError::Service(UpdateUserError::ServiceUnavailable(err.msg)) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UpdateUserError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UpdateUserError { + fn description(&self) -> &str { + match *self { + UpdateUserError::InternalServiceError(ref cause) => cause, + UpdateUserError::InvalidRequest(ref cause) => cause, + UpdateUserError::ResourceNotFound(ref cause) => cause, + UpdateUserError::ServiceUnavailable(ref cause) => cause, + } + } +} +/// Trait representing the capabilities of the AWS Transfer API. AWS Transfer clients implement this trait. +pub trait Transfer { + ///

Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. When you make updates to your server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

+ fn create_server( + &self, + input: CreateServerRequest, + ) -> RusotoFuture; + + ///

Creates a user and associates them with an existing Secure File Transfer Protocol (SFTP) server. You can only create and associate users with SFTP servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

+ fn create_user( + &self, + input: CreateUserRequest, + ) -> RusotoFuture; + + ///

Deletes the Secure File Transfer Protocol (SFTP) server that you specify.

No response returns from this operation.

+ fn delete_server(&self, input: DeleteServerRequest) -> RusotoFuture<(), DeleteServerError>; + + ///

Deletes a user's Secure Shell (SSH) public key.

No response is returned from this operation.

+ fn delete_ssh_public_key( + &self, + input: DeleteSshPublicKeyRequest, + ) -> RusotoFuture<(), DeleteSshPublicKeyError>; + + ///

Deletes the user belonging to the server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

+ fn delete_user(&self, input: DeleteUserRequest) -> RusotoFuture<(), DeleteUserError>; + + ///

Describes the server that you specify by passing the ServerId parameter.

The response contains a description of the server's properties.

+ fn describe_server( + &self, + input: DescribeServerRequest, + ) -> RusotoFuture; + + ///

Describes the user assigned to a specific server, as identified by its ServerId property.

The response from this call returns the properties of the user associated with the ServerId value that was specified.

+ fn describe_user( + &self, + input: DescribeUserRequest, + ) -> RusotoFuture; + + ///

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to a specific server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

+ fn import_ssh_public_key( + &self, + input: ImportSshPublicKeyRequest, + ) -> RusotoFuture; + + ///

Lists the Secure File Transfer Protocol (SFTP) servers that are associated with your AWS account.

+ fn list_servers( + &self, + input: ListServersRequest, + ) -> RusotoFuture; + + ///

Lists all of the tags associated with the Amazon Resource Number (ARN) you specify. The resource can be a user, server, or role.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + + ///

Lists the users for the server that you specify by passing the ServerId parameter.

+ fn list_users( + &self, + input: ListUsersRequest, + ) -> RusotoFuture; + + ///

Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE. It has no impact on an SFTP server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

+ fn start_server(&self, input: StartServerRequest) -> RusotoFuture<(), StartServerError>; + + ///

Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

+ fn stop_server(&self, input: StopServerRequest) -> RusotoFuture<(), StopServerError>; + + ///

Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

There is no response returned from this call.

+ fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError>; + + ///

If the IdentityProviderType of the server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

+ fn test_identity_provider( + &self, + input: TestIdentityProviderRequest, + ) -> RusotoFuture; + + ///

Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

No response is returned from this call.

+ fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError>; + + ///

Updates the server properties after that server has been created.

The UpdateServer call returns the ServerId of the Secure File Transfer Protocol (SFTP) server you updated.

+ fn update_server( + &self, + input: UpdateServerRequest, + ) -> RusotoFuture; + + ///

Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the UserName and ServerId you specify.

The response returns the ServerId and the UserName for the updated user.

+ fn update_user( + &self, + input: UpdateUserRequest, + ) -> RusotoFuture; +} +/// A client for the AWS Transfer API. +#[derive(Clone)] +pub struct TransferClient { + client: Client, + region: region::Region, +} + +impl TransferClient { + /// Creates a client backed by the default tokio event loop. + /// + /// The client will use the default credentials provider and tls client. + pub fn new(region: region::Region) -> TransferClient { + Self::new_with_client(Client::shared(), region) + } + + pub fn new_with( + request_dispatcher: D, + credentials_provider: P, + region: region::Region, + ) -> TransferClient + where + P: ProvideAwsCredentials + Send + Sync + 'static, + P::Future: Send, + D: DispatchSignedRequest + Send + Sync + 'static, + D::Future: Send, + { + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), + region, + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> TransferClient { + TransferClient { client, region } + } +} + +impl Transfer for TransferClient { + ///

Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. When you make updates to your server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

+ fn create_server( + &self, + input: CreateServerRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.CreateServer"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateServerError::from_response(response))), + ) + } + }) + } + + ///

Creates a user and associates them with an existing Secure File Transfer Protocol (SFTP) server. You can only create and associate users with SFTP servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

+ fn create_user( + &self, + input: CreateUserRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.CreateUser"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CreateUserError::from_response(response))), + ) + } + }) + } + + ///

Deletes the Secure File Transfer Protocol (SFTP) server that you specify.

No response returns from this operation.

+ fn delete_server(&self, input: DeleteServerRequest) -> RusotoFuture<(), DeleteServerError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.DeleteServer"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteServerError::from_response(response))), + ) + } + }) + } + + ///

Deletes a user's Secure Shell (SSH) public key.

No response is returned from this operation.

+ fn delete_ssh_public_key( + &self, + input: DeleteSshPublicKeyRequest, + ) -> RusotoFuture<(), DeleteSshPublicKeyError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.DeleteSshPublicKey"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteSshPublicKeyError::from_response(response))), + ) + } + }) + } + + ///

Deletes the user belonging to the server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

+ fn delete_user(&self, input: DeleteUserRequest) -> RusotoFuture<(), DeleteUserError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.DeleteUser"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DeleteUserError::from_response(response))), + ) + } + }) + } + + ///

Describes the server that you specify by passing the ServerId parameter.

The response contains a description of the server's properties.

+ fn describe_server( + &self, + input: DescribeServerRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.DescribeServer"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeServerError::from_response(response))), + ) + } + }) + } + + ///

Describes the user assigned to a specific server, as identified by its ServerId property.

The response from this call returns the properties of the user associated with the ServerId value that was specified.

+ fn describe_user( + &self, + input: DescribeUserRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.DescribeUser"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(DescribeUserError::from_response(response))), + ) + } + }) + } + + ///

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to a specific server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

+ fn import_ssh_public_key( + &self, + input: ImportSshPublicKeyRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.ImportSshPublicKey"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ImportSshPublicKeyError::from_response(response))), + ) + } + }) + } + + ///

Lists the Secure File Transfer Protocol (SFTP) servers that are associated with your AWS account.

+ fn list_servers( + &self, + input: ListServersRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.ListServers"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListServersError::from_response(response))), + ) + } + }) + } + + ///

Lists all of the tags associated with the Amazon Resource Number (ARN) you specify. The resource can be a user, server, or role.

+ fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.ListTagsForResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + + ///

Lists the users for the server that you specify by passing the ServerId parameter.

+ fn list_users( + &self, + input: ListUsersRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.ListUsers"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(ListUsersError::from_response(response))), + ) + } + }) + } + + ///

Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE. It has no impact on an SFTP server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

+ fn start_server(&self, input: StartServerRequest) -> RusotoFuture<(), StartServerError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.StartServer"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(StartServerError::from_response(response))), + ) + } + }) + } + + ///

Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

+ fn stop_server(&self, input: StopServerRequest) -> RusotoFuture<(), StopServerError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.StopServer"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(StopServerError::from_response(response))), + ) + } + }) + } + + ///

Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

There is no response returned from this call.

+ fn tag_resource(&self, input: TagResourceRequest) -> RusotoFuture<(), TagResourceError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.TagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + ///

If the IdentityProviderType of the server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

+ fn test_identity_provider( + &self, + input: TestIdentityProviderRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.TestIdentityProvider"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(TestIdentityProviderError::from_response(response)) + }), + ) + } + }) + } + + ///

Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

No response is returned from this call.

+ fn untag_resource(&self, input: UntagResourceRequest) -> RusotoFuture<(), UntagResourceError> { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.UntagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(future::ok(::std::mem::drop(response))) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + + ///

Updates the server properties after that server has been created.

The UpdateServer call returns the ServerId of the Secure File Transfer Protocol (SFTP) server you updated.

+ fn update_server( + &self, + input: UpdateServerRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.UpdateServer"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateServerError::from_response(response))), + ) + } + }) + } + + ///

Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the UserName and ServerId you specify.

The response returns the ServerId and the UserName for the updated user.

+ fn update_user( + &self, + input: UpdateUserRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "transfer", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "TransferService.UpdateUser"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UpdateUserError::from_response(response))), + ) + } + }) + } +} diff --git a/rusoto/services/transfer/src/lib.rs b/rusoto/services/transfer/src/lib.rs new file mode 100644 index 00000000000..91ef45f39ef --- /dev/null +++ b/rusoto/services/transfer/src/lib.rs @@ -0,0 +1,32 @@ + +// ================================================================= +// +// * WARNING * +// +// This file is generated! +// +// Changes made to this file will be overwritten. If changes are +// required to the generated code, the service_crategen project +// must be updated to generate the changes. +// +// ================================================================= + +#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] +//!

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and set up.

+//! +//! If you're using the service, you're probably looking for [TransferClient](struct.TransferClient.html) and [Transfer](trait.Transfer.html). + +extern crate bytes; +extern crate futures; +extern crate rusoto_core; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +mod generated; +mod custom; + +pub use crate::generated::*; +pub use crate::custom::*; + diff --git a/rusoto/services/translate/Cargo.toml b/rusoto/services/translate/Cargo.toml index fa59d0686a1..4f2ce34a2cd 100644 --- a/rusoto/services/translate/Cargo.toml +++ b/rusoto/services/translate/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_translate" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/translate/README.md b/rusoto/services/translate/README.md index 6b97cabbe0b..acd92705b7d 100644 --- a/rusoto/services/translate/README.md +++ b/rusoto/services/translate/README.md @@ -23,9 +23,16 @@ To use `rusoto_translate` in your application, add it as a dependency in your `C ```toml [dependencies] -rusoto_translate = "0.40.0" +rusoto_translate = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/translate/src/custom/mod.rs b/rusoto/services/translate/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/translate/src/custom/mod.rs +++ b/rusoto/services/translate/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/translate/src/generated.rs b/rusoto/services/translate/src/generated.rs index b6897d5462b..49d98725292 100644 --- a/rusoto/services/translate/src/generated.rs +++ b/rusoto/services/translate/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

The custom terminology applied to the input text by Amazon Translate for the translated text response. This is optional in the response and will only be present if you specified terminology input in the request. Currently, only one terminology can be applied per TranslateText request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AppliedTerminology { ///

The name of the custom terminology applied to the input text by Amazon Translate for the translated text response.

#[serde(rename = "Name")] @@ -67,7 +66,7 @@ pub struct GetTerminologyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTerminologyResponse { ///

The data location of the custom terminology being retrieved. The custom terminology file is returned in a presigned url that has a 30 minute expiration.

#[serde(rename = "TerminologyDataLocation")] @@ -101,7 +100,7 @@ pub struct ImportTerminologyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportTerminologyResponse { ///

The properties of the custom terminology being imported.

#[serde(rename = "TerminologyProperties")] @@ -122,7 +121,7 @@ pub struct ListTerminologiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListTerminologiesResponse { ///

If the response to the ListTerminologies was truncated, the NextToken fetches the next group of custom terminologies.

#[serde(rename = "NextToken")] @@ -136,7 +135,7 @@ pub struct ListTerminologiesResponse { ///

The term being translated by the custom terminology.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Term { ///

The source text of the term being translated by the custom terminology.

#[serde(rename = "SourceText")] @@ -166,7 +165,7 @@ pub struct TerminologyData { ///

The location of the custom terminology data.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminologyDataLocation { ///

The location of the custom terminology data.

#[serde(rename = "Location")] @@ -178,7 +177,7 @@ pub struct TerminologyDataLocation { ///

The properties of the custom terminology.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminologyProperties { ///

The Amazon Resource Name (ARN) of the custom terminology.

#[serde(rename = "Arn")] @@ -240,7 +239,7 @@ pub struct TranslateTextRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TranslateTextResponse { ///

The names of the custom terminologies applied to the input text by Amazon Translate for the translated text response.

#[serde(rename = "AppliedTerminologies")] @@ -578,10 +577,7 @@ impl TranslateClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> TranslateClient { - TranslateClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -595,10 +591,14 @@ impl TranslateClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - TranslateClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> TranslateClient { + TranslateClient { client, region } } } diff --git a/rusoto/services/waf-regional/Cargo.toml b/rusoto/services/waf-regional/Cargo.toml index 8540833c2d4..9527b38cd1c 100644 --- a/rusoto/services/waf-regional/Cargo.toml +++ b/rusoto/services/waf-regional/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_waf_regional" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/waf-regional/README.md b/rusoto/services/waf-regional/README.md index 4ed9f96f667..afd51eabdcf 100644 --- a/rusoto/services/waf-regional/README.md +++ b/rusoto/services/waf-regional/README.md @@ -23,9 +23,16 @@ To use `rusoto_waf_regional` in your application, add it as a dependency in your ```toml [dependencies] -rusoto_waf_regional = "0.40.0" +rusoto_waf_regional = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/waf-regional/src/custom/mod.rs b/rusoto/services/waf-regional/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/waf-regional/src/custom/mod.rs +++ b/rusoto/services/waf-regional/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/waf-regional/src/generated.rs b/rusoto/services/waf-regional/src/generated.rs index 646e70f70dd..6afe7cb36c7 100644 --- a/rusoto/services/waf-regional/src/generated.rs +++ b/rusoto/services/waf-regional/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -62,12 +61,12 @@ pub struct AssociateWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateWebACLResponse {} ///

In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ByteMatchSet { ///

The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet), insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet).

ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

#[serde(rename = "ByteMatchSetId")] @@ -83,7 +82,7 @@ pub struct ByteMatchSet { ///

Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ByteMatchSetSummary { ///

The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a Rule, and delete a ByteMatchSet from AWS WAF.

ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

#[serde(rename = "ByteMatchSetId")] @@ -137,7 +136,7 @@ pub struct CreateByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateByteMatchSetResponse { ///

A ByteMatchSet that contains no ByteMatchTuple objects.

#[serde(rename = "ByteMatchSet")] @@ -160,7 +159,7 @@ pub struct CreateGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGeoMatchSetResponse { ///

The ChangeToken that you used to submit the CreateGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -183,7 +182,7 @@ pub struct CreateIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIPSetResponse { ///

The ChangeToken that you used to submit the CreateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -212,10 +211,13 @@ pub struct CreateRateBasedRuleRequest { ///

The maximum number of requests, which have an identical value in the field that is specified by RateKey, allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.

#[serde(rename = "RateLimit")] pub rate_limit: i64, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRateBasedRuleResponse { ///

The ChangeToken that you used to submit the CreateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -238,7 +240,7 @@ pub struct CreateRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRegexMatchSetResponse { ///

The ChangeToken that you used to submit the CreateRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -261,7 +263,7 @@ pub struct CreateRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRegexPatternSetResponse { ///

The ChangeToken that you used to submit the CreateRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -284,10 +286,13 @@ pub struct CreateRuleGroupRequest { ///

A friendly name or description of the RuleGroup. You can't change Name after you create a RuleGroup.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRuleGroupResponse { ///

The ChangeToken that you used to submit the CreateRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -310,10 +315,13 @@ pub struct CreateRuleRequest { ///

A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRuleResponse { ///

The ChangeToken that you used to submit the CreateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -336,7 +344,7 @@ pub struct CreateSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSizeConstraintSetResponse { ///

The ChangeToken that you used to submit the CreateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -361,7 +369,7 @@ pub struct CreateSqlInjectionMatchSetRequest { ///

The response to a CreateSqlInjectionMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSqlInjectionMatchSetResponse { ///

The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -387,10 +395,13 @@ pub struct CreateWebACLRequest { ///

A friendly name or description of the WebACL. You can't change Name after you create the WebACL.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWebACLResponse { ///

The ChangeToken that you used to submit the CreateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -415,7 +426,7 @@ pub struct CreateXssMatchSetRequest { ///

The response to a CreateXssMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateXssMatchSetResponse { ///

The ChangeToken that you used to submit the CreateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -438,7 +449,7 @@ pub struct DeleteByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteByteMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -457,7 +468,7 @@ pub struct DeleteGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGeoMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -476,7 +487,7 @@ pub struct DeleteIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteIPSetResponse { ///

The ChangeToken that you used to submit the DeleteIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -492,7 +503,7 @@ pub struct DeleteLoggingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLoggingConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -503,7 +514,7 @@ pub struct DeletePermissionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePermissionPolicyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -517,7 +528,7 @@ pub struct DeleteRateBasedRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRateBasedRuleResponse { ///

The ChangeToken that you used to submit the DeleteRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -536,7 +547,7 @@ pub struct DeleteRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRegexMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -555,7 +566,7 @@ pub struct DeleteRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRegexPatternSetResponse { ///

The ChangeToken that you used to submit the DeleteRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -574,7 +585,7 @@ pub struct DeleteRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRuleGroupResponse { ///

The ChangeToken that you used to submit the DeleteRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -593,7 +604,7 @@ pub struct DeleteRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRuleResponse { ///

The ChangeToken that you used to submit the DeleteRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -612,7 +623,7 @@ pub struct DeleteSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSizeConstraintSetResponse { ///

The ChangeToken that you used to submit the DeleteSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -633,7 +644,7 @@ pub struct DeleteSqlInjectionMatchSetRequest { ///

The response to a request to delete a SqlInjectionMatchSet from AWS WAF.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSqlInjectionMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -652,7 +663,7 @@ pub struct DeleteWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWebACLResponse { ///

The ChangeToken that you used to submit the DeleteWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -673,7 +684,7 @@ pub struct DeleteXssMatchSetRequest { ///

The response to a request to delete an XssMatchSet from AWS WAF.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteXssMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -689,7 +700,7 @@ pub struct DisassociateWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateWebACLResponse {} ///

The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup. The rule must belong to the RuleGroup that is specified by the ActivatedRule.

@@ -725,7 +736,7 @@ pub struct GeoMatchConstraint { ///

Contains one or more countries that AWS WAF will search for.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GeoMatchSet { ///

An array of GeoMatchConstraint objects, which contain the country that you want AWS WAF to search for.

#[serde(rename = "GeoMatchConstraints")] @@ -741,7 +752,7 @@ pub struct GeoMatchSet { ///

Contains the identifier and the name of the GeoMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GeoMatchSetSummary { ///

The GeoMatchSetId for an GeoMatchSet. You can use GeoMatchSetId in a GetGeoMatchSet request to get detailed information about an GeoMatchSet.

#[serde(rename = "GeoMatchSetId")] @@ -770,7 +781,7 @@ pub struct GetByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetByteMatchSetResponse { ///

Information about the ByteMatchSet that you specified in the GetByteMatchSet request. For more information, see the following topics:

  • ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name

  • ByteMatchTuples: Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains FieldToMatch, PositionalConstraint, TargetString, and TextTransformation

  • FieldToMatch: Contains Data and Type

#[serde(rename = "ByteMatchSet")] @@ -782,7 +793,7 @@ pub struct GetByteMatchSetResponse { pub struct GetChangeTokenRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetChangeTokenResponse { ///

The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus request to get the current status of the request.

#[serde(rename = "ChangeToken")] @@ -798,7 +809,7 @@ pub struct GetChangeTokenStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetChangeTokenStatusResponse { ///

The status of the change token.

#[serde(rename = "ChangeTokenStatus")] @@ -814,7 +825,7 @@ pub struct GetGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGeoMatchSetResponse { ///

Information about the GeoMatchSet that you specified in the GetGeoMatchSet request. This includes the Type, which for a GeoMatchContraint is always Country, as well as the Value, which is the identifier for a specific country.

#[serde(rename = "GeoMatchSet")] @@ -830,7 +841,7 @@ pub struct GetIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIPSetResponse { ///

Information about the IPSet that you specified in the GetIPSet request. For more information, see the following topics:

  • IPSet: Contains IPSetDescriptors, IPSetId, and Name

  • IPSetDescriptors: Contains an array of IPSetDescriptor objects. Each IPSetDescriptor object contains Type and Value

#[serde(rename = "IPSet")] @@ -846,7 +857,7 @@ pub struct GetLoggingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoggingConfigurationResponse { ///

The LoggingConfiguration for the specified web ACL.

#[serde(rename = "LoggingConfiguration")] @@ -862,7 +873,7 @@ pub struct GetPermissionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPermissionPolicyResponse { ///

The IAM policy attached to the specified RuleGroup.

#[serde(rename = "Policy")] @@ -882,7 +893,7 @@ pub struct GetRateBasedRuleManagedKeysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRateBasedRuleManagedKeysResponse { ///

An array of IP addresses that currently are blocked by the specified RateBasedRule.

#[serde(rename = "ManagedKeys")] @@ -902,7 +913,7 @@ pub struct GetRateBasedRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRateBasedRuleResponse { ///

Information about the RateBasedRule that you specified in the GetRateBasedRule request.

#[serde(rename = "Rule")] @@ -918,7 +929,7 @@ pub struct GetRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRegexMatchSetResponse { ///

Information about the RegexMatchSet that you specified in the GetRegexMatchSet request. For more information, see RegexMatchTuple.

#[serde(rename = "RegexMatchSet")] @@ -934,7 +945,7 @@ pub struct GetRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRegexPatternSetResponse { ///

Information about the RegexPatternSet that you specified in the GetRegexPatternSet request, including the identifier of the pattern set and the regular expression patterns you want AWS WAF to search for.

#[serde(rename = "RegexPatternSet")] @@ -950,7 +961,7 @@ pub struct GetRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRuleGroupResponse { ///

Information about the RuleGroup that you specified in the GetRuleGroup request.

#[serde(rename = "RuleGroup")] @@ -966,7 +977,7 @@ pub struct GetRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRuleResponse { ///

Information about the Rule that you specified in the GetRule request. For more information, see the following topics:

  • Rule: Contains MetricName, Name, an array of Predicate objects, and RuleId

  • Predicate: Each Predicate object contains DataId, Negated, and Type

#[serde(rename = "Rule")] @@ -991,7 +1002,7 @@ pub struct GetSampledRequestsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSampledRequestsResponse { ///

The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems, the sample includes every request that your AWS resource received during the specified time range.

#[serde(rename = "PopulationSize")] @@ -1015,7 +1026,7 @@ pub struct GetSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSizeConstraintSetResponse { ///

Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet request. For more information, see the following topics:

#[serde(rename = "SizeConstraintSet")] @@ -1033,7 +1044,7 @@ pub struct GetSqlInjectionMatchSetRequest { ///

The response to a GetSqlInjectionMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSqlInjectionMatchSetResponse { ///

Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet request. For more information, see the following topics:

#[serde(rename = "SqlInjectionMatchSet")] @@ -1049,7 +1060,7 @@ pub struct GetWebACLForResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetWebACLForResourceResponse { ///

Information about the web ACL that you specified in the GetWebACLForResource request. If there is no associated resource, a null WebACLSummary is returned.

#[serde(rename = "WebACLSummary")] @@ -1065,7 +1076,7 @@ pub struct GetWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetWebACLResponse { ///

Information about the WebACL that you specified in the GetWebACL request. For more information, see the following topics:

  • WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, and WebACLId

  • DefaultAction (Data type is WafAction): Contains Type

  • Rules: Contains an array of ActivatedRule objects, which contain Action, Priority, and RuleId

  • Action: Contains Type

#[serde(rename = "WebACL")] @@ -1083,7 +1094,7 @@ pub struct GetXssMatchSetRequest { ///

The response to a GetXssMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetXssMatchSetResponse { ///

Information about the XssMatchSet that you specified in the GetXssMatchSet request. For more information, see the following topics:

  • XssMatchSet: Contains Name, XssMatchSetId, and an array of XssMatchTuple objects

  • XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and TextTransformation

  • FieldToMatch: Contains Data and Type

#[serde(rename = "XssMatchSet")] @@ -1093,7 +1104,7 @@ pub struct GetXssMatchSetResponse { ///

The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HTTPHeader { ///

The name of one of the headers in the sampled web request.

#[serde(rename = "Name")] @@ -1107,7 +1118,7 @@ pub struct HTTPHeader { ///

The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HTTPRequest { ///

The IP address that the request originated from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

  • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request

  • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request

#[serde(rename = "ClientIP")] @@ -1137,7 +1148,7 @@ pub struct HTTPRequest { ///

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IPSet { ///

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

#[serde(rename = "IPSetDescriptors")] @@ -1164,7 +1175,7 @@ pub struct IPSetDescriptor { ///

Contains the identifier and the name of the IPSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IPSetSummary { ///

The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get detailed information about an IPSet.

#[serde(rename = "IPSetId")] @@ -1202,7 +1213,7 @@ pub struct ListActivatedRulesInRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListActivatedRulesInRuleGroupResponse { ///

An array of ActivatedRules objects.

#[serde(rename = "ActivatedRules")] @@ -1227,7 +1238,7 @@ pub struct ListByteMatchSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListByteMatchSetsResponse { ///

An array of ByteMatchSetSummary objects.

#[serde(rename = "ByteMatchSets")] @@ -1252,7 +1263,7 @@ pub struct ListGeoMatchSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGeoMatchSetsResponse { ///

An array of GeoMatchSetSummary objects.

#[serde(rename = "GeoMatchSets")] @@ -1277,7 +1288,7 @@ pub struct ListIPSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIPSetsResponse { ///

An array of IPSetSummary objects.

#[serde(rename = "IPSets")] @@ -1302,7 +1313,7 @@ pub struct ListLoggingConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLoggingConfigurationsResponse { ///

An array of LoggingConfiguration objects.

#[serde(rename = "LoggingConfigurations")] @@ -1327,7 +1338,7 @@ pub struct ListRateBasedRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRateBasedRulesResponse { ///

If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules, submit another ListRateBasedRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1352,7 +1363,7 @@ pub struct ListRegexMatchSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRegexMatchSetsResponse { ///

If you have more RegexMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RegexMatchSet objects, submit another ListRegexMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1377,7 +1388,7 @@ pub struct ListRegexPatternSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRegexPatternSetsResponse { ///

If you have more RegexPatternSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RegexPatternSet objects, submit another ListRegexPatternSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1401,7 +1412,7 @@ pub struct ListResourcesForWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourcesForWebACLResponse { ///

An array of ARNs (Amazon Resource Names) of the resources associated with the specified web ACL. An array with zero elements is returned if there are no resources associated with the web ACL.

#[serde(rename = "ResourceArns")] @@ -1422,7 +1433,7 @@ pub struct ListRuleGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRuleGroupsResponse { ///

If you have more RuleGroups than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RuleGroups, submit another ListRuleGroups request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1447,7 +1458,7 @@ pub struct ListRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRulesResponse { ///

If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules, submit another ListRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1472,7 +1483,7 @@ pub struct ListSizeConstraintSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSizeConstraintSetsResponse { ///

If you have more SizeConstraintSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SizeConstraintSet objects, submit another ListSizeConstraintSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1499,7 +1510,7 @@ pub struct ListSqlInjectionMatchSetsRequest { ///

The response to a ListSqlInjectionMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSqlInjectionMatchSetsResponse { ///

If you have more SqlInjectionMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1524,7 +1535,7 @@ pub struct ListSubscribedRuleGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSubscribedRuleGroupsResponse { ///

If you have more objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more objects, submit another ListSubscribedRuleGroups request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1536,6 +1547,29 @@ pub struct ListSubscribedRuleGroupsResponse { pub rule_groups: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + #[serde(rename = "NextMarker")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, + #[serde(rename = "ResourceARN")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + #[serde(rename = "NextMarker")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, + #[serde(rename = "TagInfoForResource")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_info_for_resource: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListWebACLsRequest { ///

Specifies the number of WebACL objects that you want AWS WAF to return for this request. If you have more WebACL objects than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of WebACL objects.

@@ -1549,7 +1583,7 @@ pub struct ListWebACLsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWebACLsResponse { ///

If you have more WebACL objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more WebACL objects, submit another ListWebACLs request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1576,7 +1610,7 @@ pub struct ListXssMatchSetsRequest { ///

The response to a ListXssMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListXssMatchSetsResponse { ///

If you have more XssMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more XssMatchSet objects, submit another ListXssMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1625,7 +1659,7 @@ pub struct PutLoggingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLoggingConfigurationResponse { ///

The LoggingConfiguration that you submitted in the request.

#[serde(rename = "LoggingConfiguration")] @@ -1644,12 +1678,12 @@ pub struct PutPermissionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutPermissionPolicyResponse {} ///

A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule counts the number of requests that arrive from a specified IP address every five minutes. For example, based on recent requests that you've seen from an attacker, you might create a RateBasedRule that includes the following conditions:

  • The requests come from 192.0.2.44.

  • They contain the value BadBot in the User-Agent header.

In the rule, you also define the rate limit as 15,000.

Requests that meet both of these conditions and exceed 15,000 requests every five minutes trigger the rule's action (block or count), which is defined in the web ACL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RateBasedRule { ///

The Predicates object contains one Predicate element for each ByteMatchSet, IPSet, or SqlInjectionMatchSet object that you want to include in a RateBasedRule.

#[serde(rename = "MatchPredicates")] @@ -1675,7 +1709,7 @@ pub struct RateBasedRule { ///

In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains the RegexMatchSetId and Name of a RegexMatchSet, and the values that you specified when you updated the RegexMatchSet.

The values are contained in a RegexMatchTuple object, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexMatchSet { ///

A friendly name or description of the RegexMatchSet. You can't change Name after you create a RegexMatchSet.

#[serde(rename = "Name")] @@ -1693,7 +1727,7 @@ pub struct RegexMatchSet { ///

Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexMatchSetSummary { ///

A friendly name or description of the RegexMatchSet. You can't change Name after you create a RegexMatchSet.

#[serde(rename = "Name")] @@ -1730,7 +1764,7 @@ pub struct RegexMatchTuple { ///

The RegexPatternSet specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexPatternSet { ///

A friendly name or description of the RegexPatternSet. You can't change Name after you create a RegexPatternSet.

#[serde(rename = "Name")] @@ -1746,7 +1780,7 @@ pub struct RegexPatternSet { ///

Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexPatternSetSummary { ///

A friendly name or description of the RegexPatternSet. You can't change Name after you create a RegexPatternSet.

#[serde(rename = "Name")] @@ -1769,7 +1803,7 @@ pub struct RegexPatternSetUpdate { ///

A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

  • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44

  • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Rule { ///

A friendly name or description for the metrics for this Rule. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change MetricName after you create the Rule.

#[serde(rename = "MetricName")] @@ -1789,7 +1823,7 @@ pub struct Rule { ///

A collection of predefined rules that you can add to a web ACL.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RuleGroup { ///

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the RuleGroup.

#[serde(rename = "MetricName")] @@ -1806,7 +1840,7 @@ pub struct RuleGroup { ///

Contains the identifier and the friendly name or description of the RuleGroup.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RuleGroupSummary { ///

A friendly name or description of the RuleGroup. You can't change the name of a RuleGroup after you create it.

#[serde(rename = "Name")] @@ -1829,7 +1863,7 @@ pub struct RuleGroupUpdate { ///

Contains the identifier and the friendly name or description of the Rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RuleSummary { ///

A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

#[serde(rename = "Name")] @@ -1852,7 +1886,7 @@ pub struct RuleUpdate { ///

The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SampledHTTPRequest { ///

The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT.

#[serde(rename = "Action")] @@ -1893,7 +1927,7 @@ pub struct SizeConstraint { ///

A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SizeConstraintSet { ///

The name, if any, of the SizeConstraintSet.

#[serde(rename = "Name")] @@ -1909,7 +1943,7 @@ pub struct SizeConstraintSet { ///

The Id and Name of a SizeConstraintSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SizeConstraintSetSummary { ///

The name of the SizeConstraintSet, if any.

#[serde(rename = "Name")] @@ -1932,7 +1966,7 @@ pub struct SizeConstraintSetUpdate { ///

A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SqlInjectionMatchSet { ///

The name, if any, of the SqlInjectionMatchSet.

#[serde(rename = "Name")] @@ -1948,7 +1982,7 @@ pub struct SqlInjectionMatchSet { ///

The Id and Name of a SqlInjectionMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SqlInjectionMatchSetSummary { ///

The name of the SqlInjectionMatchSet, if any, specified by Id.

#[serde(rename = "Name")] @@ -1982,7 +2016,7 @@ pub struct SqlInjectionMatchTuple { ///

A summary of the rule groups you are subscribed to.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribedRuleGroupSummary { ///

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the RuleGroup.

#[serde(rename = "MetricName")] @@ -1995,6 +2029,39 @@ pub struct SubscribedRuleGroupSummary { pub rule_group_id: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Tag { + #[serde(rename = "Key")] + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagInfoForResource { + #[serde(rename = "ResourceARN")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_arn: Option, + #[serde(rename = "TagList")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_list: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + #[serde(rename = "ResourceARN")] + pub resource_arn: String, + #[serde(rename = "Tags")] + pub tags: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + ///

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TimeWindow { @@ -2006,6 +2073,18 @@ pub struct TimeWindow { pub start_time: f64, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + #[serde(rename = "ResourceARN")] + pub resource_arn: String, + #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateByteMatchSetRequest { ///

The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

@@ -2020,7 +2099,7 @@ pub struct UpdateByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateByteMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2042,7 +2121,7 @@ pub struct UpdateGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGeoMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2064,7 +2143,7 @@ pub struct UpdateIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIPSetResponse { ///

The ChangeToken that you used to submit the UpdateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2089,7 +2168,7 @@ pub struct UpdateRateBasedRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRateBasedRuleResponse { ///

The ChangeToken that you used to submit the UpdateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2111,7 +2190,7 @@ pub struct UpdateRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRegexMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2133,7 +2212,7 @@ pub struct UpdateRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRegexPatternSetResponse { ///

The ChangeToken that you used to submit the UpdateRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2155,7 +2234,7 @@ pub struct UpdateRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRuleGroupResponse { ///

The ChangeToken that you used to submit the UpdateRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2177,7 +2256,7 @@ pub struct UpdateRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRuleResponse { ///

The ChangeToken that you used to submit the UpdateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2199,7 +2278,7 @@ pub struct UpdateSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSizeConstraintSetResponse { ///

The ChangeToken that you used to submit the UpdateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2223,7 +2302,7 @@ pub struct UpdateSqlInjectionMatchSetRequest { ///

The response to an UpdateSqlInjectionMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSqlInjectionMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2250,7 +2329,7 @@ pub struct UpdateWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateWebACLResponse { ///

The ChangeToken that you used to submit the UpdateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2274,7 +2353,7 @@ pub struct UpdateXssMatchSetRequest { ///

The response to an UpdateXssMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateXssMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2300,7 +2379,7 @@ pub struct WafOverrideAction { ///

Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WebACL { ///

The action to perform if none of the Rules contained in the WebACL match. The action is specified by the WafAction object.

#[serde(rename = "DefaultAction")] @@ -2327,7 +2406,7 @@ pub struct WebACL { ///

Contains the identifier and the name or description of the WebACL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WebACLSummary { ///

A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

#[serde(rename = "Name")] @@ -2350,7 +2429,7 @@ pub struct WebACLUpdate { ///

A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct XssMatchSet { ///

The name, if any, of the XssMatchSet.

#[serde(rename = "Name")] @@ -2366,7 +2445,7 @@ pub struct XssMatchSet { ///

The Id and Name of an XssMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct XssMatchSetSummary { ///

The name of the XssMatchSet, if any, specified by Id.

#[serde(rename = "Name")] @@ -2659,6 +2738,7 @@ impl Error for CreateIPSetError { /// Errors returned by CreateRateBasedRule #[derive(Debug, PartialEq)] pub enum CreateRateBasedRuleError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2669,12 +2749,19 @@ pub enum CreateRateBasedRuleError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateRateBasedRuleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateRateBasedRuleError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateRateBasedRuleError::WAFDisallowedName( err.msg, @@ -2698,6 +2785,14 @@ impl CreateRateBasedRuleError { "WAFStaleDataException" => { return RusotoError::Service(CreateRateBasedRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateRateBasedRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + CreateRateBasedRuleError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2713,11 +2808,14 @@ impl fmt::Display for CreateRateBasedRuleError { impl Error for CreateRateBasedRuleError { fn description(&self) -> &str { match *self { + CreateRateBasedRuleError::WAFBadRequest(ref cause) => cause, CreateRateBasedRuleError::WAFDisallowedName(ref cause) => cause, CreateRateBasedRuleError::WAFInternalError(ref cause) => cause, CreateRateBasedRuleError::WAFInvalidParameter(ref cause) => cause, CreateRateBasedRuleError::WAFLimitsExceeded(ref cause) => cause, CreateRateBasedRuleError::WAFStaleData(ref cause) => cause, + CreateRateBasedRuleError::WAFTagOperation(ref cause) => cause, + CreateRateBasedRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -2838,6 +2936,7 @@ impl Error for CreateRegexPatternSetError { /// Errors returned by CreateRule #[derive(Debug, PartialEq)] pub enum CreateRuleError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2848,12 +2947,19 @@ pub enum CreateRuleError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateRuleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateRuleError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateRuleError::WAFDisallowedName(err.msg)) } @@ -2869,6 +2975,14 @@ impl CreateRuleError { "WAFStaleDataException" => { return RusotoError::Service(CreateRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(CreateRuleError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2884,17 +2998,21 @@ impl fmt::Display for CreateRuleError { impl Error for CreateRuleError { fn description(&self) -> &str { match *self { + CreateRuleError::WAFBadRequest(ref cause) => cause, CreateRuleError::WAFDisallowedName(ref cause) => cause, CreateRuleError::WAFInternalError(ref cause) => cause, CreateRuleError::WAFInvalidParameter(ref cause) => cause, CreateRuleError::WAFLimitsExceeded(ref cause) => cause, CreateRuleError::WAFStaleData(ref cause) => cause, + CreateRuleError::WAFTagOperation(ref cause) => cause, + CreateRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } /// Errors returned by CreateRuleGroup #[derive(Debug, PartialEq)] pub enum CreateRuleGroupError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2903,12 +3021,19 @@ pub enum CreateRuleGroupError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateRuleGroupError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateRuleGroupError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateRuleGroupError::WAFDisallowedName(err.msg)) } @@ -2921,6 +3046,14 @@ impl CreateRuleGroupError { "WAFStaleDataException" => { return RusotoError::Service(CreateRuleGroupError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateRuleGroupError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + CreateRuleGroupError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2936,10 +3069,13 @@ impl fmt::Display for CreateRuleGroupError { impl Error for CreateRuleGroupError { fn description(&self) -> &str { match *self { + CreateRuleGroupError::WAFBadRequest(ref cause) => cause, CreateRuleGroupError::WAFDisallowedName(ref cause) => cause, CreateRuleGroupError::WAFInternalError(ref cause) => cause, CreateRuleGroupError::WAFLimitsExceeded(ref cause) => cause, CreateRuleGroupError::WAFStaleData(ref cause) => cause, + CreateRuleGroupError::WAFTagOperation(ref cause) => cause, + CreateRuleGroupError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3098,6 +3234,7 @@ impl Error for CreateSqlInjectionMatchSetError { /// Errors returned by CreateWebACL #[derive(Debug, PartialEq)] pub enum CreateWebACLError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -3110,12 +3247,19 @@ pub enum CreateWebACLError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateWebACLError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateWebACLError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateWebACLError::WAFDisallowedName(err.msg)) } @@ -3134,6 +3278,14 @@ impl CreateWebACLError { "WAFStaleDataException" => { return RusotoError::Service(CreateWebACLError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateWebACLError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(CreateWebACLError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3149,12 +3301,15 @@ impl fmt::Display for CreateWebACLError { impl Error for CreateWebACLError { fn description(&self) -> &str { match *self { + CreateWebACLError::WAFBadRequest(ref cause) => cause, CreateWebACLError::WAFDisallowedName(ref cause) => cause, CreateWebACLError::WAFInternalError(ref cause) => cause, CreateWebACLError::WAFInvalidAccount(ref cause) => cause, CreateWebACLError::WAFInvalidParameter(ref cause) => cause, CreateWebACLError::WAFLimitsExceeded(ref cause) => cause, CreateWebACLError::WAFStaleData(ref cause) => cause, + CreateWebACLError::WAFTagOperation(ref cause) => cause, + CreateWebACLError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3539,6 +3694,10 @@ pub enum DeleteRateBasedRuleError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteRateBasedRuleError { @@ -3573,6 +3732,14 @@ impl DeleteRateBasedRuleError { "WAFStaleDataException" => { return RusotoError::Service(DeleteRateBasedRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteRateBasedRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + DeleteRateBasedRuleError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3594,6 +3761,8 @@ impl Error for DeleteRateBasedRuleError { DeleteRateBasedRuleError::WAFNonexistentItem(ref cause) => cause, DeleteRateBasedRuleError::WAFReferencedItem(ref cause) => cause, DeleteRateBasedRuleError::WAFStaleData(ref cause) => cause, + DeleteRateBasedRuleError::WAFTagOperation(ref cause) => cause, + DeleteRateBasedRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3758,6 +3927,10 @@ pub enum DeleteRuleError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteRuleError { @@ -3782,6 +3955,14 @@ impl DeleteRuleError { "WAFStaleDataException" => { return RusotoError::Service(DeleteRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(DeleteRuleError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3803,6 +3984,8 @@ impl Error for DeleteRuleError { DeleteRuleError::WAFNonexistentItem(ref cause) => cause, DeleteRuleError::WAFReferencedItem(ref cause) => cause, DeleteRuleError::WAFStaleData(ref cause) => cause, + DeleteRuleError::WAFTagOperation(ref cause) => cause, + DeleteRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3821,6 +4004,10 @@ pub enum DeleteRuleGroupError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteRuleGroupError { @@ -3845,6 +4032,14 @@ impl DeleteRuleGroupError { "WAFStaleDataException" => { return RusotoError::Service(DeleteRuleGroupError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteRuleGroupError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + DeleteRuleGroupError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3866,6 +4061,8 @@ impl Error for DeleteRuleGroupError { DeleteRuleGroupError::WAFNonexistentItem(ref cause) => cause, DeleteRuleGroupError::WAFReferencedItem(ref cause) => cause, DeleteRuleGroupError::WAFStaleData(ref cause) => cause, + DeleteRuleGroupError::WAFTagOperation(ref cause) => cause, + DeleteRuleGroupError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -4036,6 +4233,10 @@ pub enum DeleteWebACLError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteWebACLError { @@ -4060,6 +4261,14 @@ impl DeleteWebACLError { "WAFStaleDataException" => { return RusotoError::Service(DeleteWebACLError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteWebACLError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(DeleteWebACLError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -4081,6 +4290,8 @@ impl Error for DeleteWebACLError { DeleteWebACLError::WAFNonexistentItem(ref cause) => cause, DeleteWebACLError::WAFReferencedItem(ref cause) => cause, DeleteWebACLError::WAFStaleData(ref cause) => cause, + DeleteWebACLError::WAFTagOperation(ref cause) => cause, + DeleteWebACLError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -5693,6 +5904,76 @@ impl Error for ListSubscribedRuleGroupsError { } } } +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + WAFBadRequest(String), + ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

+ WAFInternalError(String), + ///

The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

  • You specified an invalid parameter name.

  • You specified an invalid value.

  • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.

  • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to create a RateBasedRule with a RateKey value other than IP.

  • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, METHOD, QUERY_STRING, URI, or BODY.

  • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.

+ WAFInvalidParameter(String), + ///

The operation failed because the referenced object doesn't exist.

+ WAFNonexistentItem(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(ListTagsForResourceError::WAFBadRequest(err.msg)) + } + "WAFInternalErrorException" => { + return RusotoError::Service(ListTagsForResourceError::WAFInternalError( + err.msg, + )) + } + "WAFInvalidParameterException" => { + return RusotoError::Service(ListTagsForResourceError::WAFInvalidParameter( + err.msg, + )) + } + "WAFNonexistentItemException" => { + return RusotoError::Service(ListTagsForResourceError::WAFNonexistentItem( + err.msg, + )) + } + "WAFTagOperationException" => { + return RusotoError::Service(ListTagsForResourceError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + ListTagsForResourceError::WAFTagOperationInternalError(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::WAFBadRequest(ref cause) => cause, + ListTagsForResourceError::WAFInternalError(ref cause) => cause, + ListTagsForResourceError::WAFInvalidParameter(ref cause) => cause, + ListTagsForResourceError::WAFNonexistentItem(ref cause) => cause, + ListTagsForResourceError::WAFTagOperation(ref cause) => cause, + ListTagsForResourceError::WAFTagOperationInternalError(ref cause) => cause, + } + } +} /// Errors returned by ListWebACLs #[derive(Debug, PartialEq)] pub enum ListWebACLsError { @@ -5887,6 +6168,140 @@ impl Error for PutPermissionPolicyError { } } } +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + WAFBadRequest(String), + ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

+ WAFInternalError(String), + ///

The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

  • You specified an invalid parameter name.

  • You specified an invalid value.

  • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.

  • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to create a RateBasedRule with a RateKey value other than IP.

  • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, METHOD, QUERY_STRING, URI, or BODY.

  • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.

+ WAFInvalidParameter(String), + ///

The operation exceeds a resource limit, for example, the maximum number of WebACL objects that you can create for an AWS account. For more information, see Limits in the AWS WAF Developer Guide.

+ WAFLimitsExceeded(String), + ///

The operation failed because the referenced object doesn't exist.

+ WAFNonexistentItem(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(TagResourceError::WAFBadRequest(err.msg)) + } + "WAFInternalErrorException" => { + return RusotoError::Service(TagResourceError::WAFInternalError(err.msg)) + } + "WAFInvalidParameterException" => { + return RusotoError::Service(TagResourceError::WAFInvalidParameter(err.msg)) + } + "WAFLimitsExceededException" => { + return RusotoError::Service(TagResourceError::WAFLimitsExceeded(err.msg)) + } + "WAFNonexistentItemException" => { + return RusotoError::Service(TagResourceError::WAFNonexistentItem(err.msg)) + } + "WAFTagOperationException" => { + return RusotoError::Service(TagResourceError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(TagResourceError::WAFTagOperationInternalError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::WAFBadRequest(ref cause) => cause, + TagResourceError::WAFInternalError(ref cause) => cause, + TagResourceError::WAFInvalidParameter(ref cause) => cause, + TagResourceError::WAFLimitsExceeded(ref cause) => cause, + TagResourceError::WAFNonexistentItem(ref cause) => cause, + TagResourceError::WAFTagOperation(ref cause) => cause, + TagResourceError::WAFTagOperationInternalError(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + WAFBadRequest(String), + ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

+ WAFInternalError(String), + ///

The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

  • You specified an invalid parameter name.

  • You specified an invalid value.

  • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.

  • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to create a RateBasedRule with a RateKey value other than IP.

  • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, METHOD, QUERY_STRING, URI, or BODY.

  • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.

+ WAFInvalidParameter(String), + ///

The operation failed because the referenced object doesn't exist.

+ WAFNonexistentItem(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(UntagResourceError::WAFBadRequest(err.msg)) + } + "WAFInternalErrorException" => { + return RusotoError::Service(UntagResourceError::WAFInternalError(err.msg)) + } + "WAFInvalidParameterException" => { + return RusotoError::Service(UntagResourceError::WAFInvalidParameter(err.msg)) + } + "WAFNonexistentItemException" => { + return RusotoError::Service(UntagResourceError::WAFNonexistentItem(err.msg)) + } + "WAFTagOperationException" => { + return RusotoError::Service(UntagResourceError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(UntagResourceError::WAFTagOperationInternalError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::WAFBadRequest(ref cause) => cause, + UntagResourceError::WAFInternalError(ref cause) => cause, + UntagResourceError::WAFInvalidParameter(ref cause) => cause, + UntagResourceError::WAFNonexistentItem(ref cause) => cause, + UntagResourceError::WAFTagOperation(ref cause) => cause, + UntagResourceError::WAFTagOperationInternalError(ref cause) => cause, + } + } +} /// Errors returned by UpdateByteMatchSet #[derive(Debug, PartialEq)] pub enum UpdateByteMatchSetError { @@ -7296,6 +7711,11 @@ pub trait WAFRegional { input: ListSubscribedRuleGroupsRequest, ) -> RusotoFuture; + fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + ///

Returns an array of WebACLSummary objects in the response.

fn list_web_ac_ls( &self, @@ -7320,6 +7740,16 @@ pub trait WAFRegional { input: PutPermissionPolicyRequest, ) -> RusotoFuture; + fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + ///

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

fn update_byte_match_set( &self, @@ -7404,10 +7834,7 @@ impl WAFRegionalClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> WAFRegionalClient { - WAFRegionalClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7421,10 +7848,14 @@ impl WAFRegionalClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - WAFRegionalClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> WAFRegionalClient { + WAFRegionalClient { client, region } } } @@ -9237,6 +9668,36 @@ impl WAFRegional for WAFRegionalClient { }) } + fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "waf-regional", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header( + "x-amz-target", + "AWSWAF_Regional_20161128.ListTagsForResource", + ); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + ///

Returns an array of WebACLSummary objects in the response.

fn list_web_ac_ls( &self, @@ -9355,6 +9816,62 @@ impl WAFRegional for WAFRegionalClient { }) } + fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "waf-regional", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSWAF_Regional_20161128.TagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "waf-regional", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSWAF_Regional_20161128.UntagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + ///

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

fn update_byte_match_set( &self, diff --git a/rusoto/services/waf/Cargo.toml b/rusoto/services/waf/Cargo.toml index 5bda920b28e..28ac692065d 100644 --- a/rusoto/services/waf/Cargo.toml +++ b/rusoto/services/waf/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_waf" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/waf/README.md b/rusoto/services/waf/README.md index d17f868eaea..eec767cf8d8 100644 --- a/rusoto/services/waf/README.md +++ b/rusoto/services/waf/README.md @@ -23,9 +23,16 @@ To use `rusoto_waf` in your application, add it as a dependency in your `Cargo.t ```toml [dependencies] -rusoto_waf = "0.40.0" +rusoto_waf = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/waf/src/custom/mod.rs b/rusoto/services/waf/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/waf/src/custom/mod.rs +++ b/rusoto/services/waf/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/waf/src/generated.rs b/rusoto/services/waf/src/generated.rs index 5cde197f1cc..607e84f5128 100644 --- a/rusoto/services/waf/src/generated.rs +++ b/rusoto/services/waf/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -53,7 +52,7 @@ pub struct ActivatedRule { ///

In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ByteMatchSet { ///

The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet), insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet).

ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

#[serde(rename = "ByteMatchSetId")] @@ -69,7 +68,7 @@ pub struct ByteMatchSet { ///

Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ByteMatchSetSummary { ///

The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a Rule, and delete a ByteMatchSet from AWS WAF.

ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

#[serde(rename = "ByteMatchSetId")] @@ -123,7 +122,7 @@ pub struct CreateByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateByteMatchSetResponse { ///

A ByteMatchSet that contains no ByteMatchTuple objects.

#[serde(rename = "ByteMatchSet")] @@ -146,7 +145,7 @@ pub struct CreateGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGeoMatchSetResponse { ///

The ChangeToken that you used to submit the CreateGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -169,7 +168,7 @@ pub struct CreateIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIPSetResponse { ///

The ChangeToken that you used to submit the CreateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -198,10 +197,13 @@ pub struct CreateRateBasedRuleRequest { ///

The maximum number of requests, which have an identical value in the field that is specified by RateKey, allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.

#[serde(rename = "RateLimit")] pub rate_limit: i64, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRateBasedRuleResponse { ///

The ChangeToken that you used to submit the CreateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -224,7 +226,7 @@ pub struct CreateRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRegexMatchSetResponse { ///

The ChangeToken that you used to submit the CreateRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -247,7 +249,7 @@ pub struct CreateRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRegexPatternSetResponse { ///

The ChangeToken that you used to submit the CreateRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -270,10 +272,13 @@ pub struct CreateRuleGroupRequest { ///

A friendly name or description of the RuleGroup. You can't change Name after you create a RuleGroup.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRuleGroupResponse { ///

The ChangeToken that you used to submit the CreateRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -296,10 +301,13 @@ pub struct CreateRuleRequest { ///

A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateRuleResponse { ///

The ChangeToken that you used to submit the CreateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -322,7 +330,7 @@ pub struct CreateSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSizeConstraintSetResponse { ///

The ChangeToken that you used to submit the CreateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -347,7 +355,7 @@ pub struct CreateSqlInjectionMatchSetRequest { ///

The response to a CreateSqlInjectionMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSqlInjectionMatchSetResponse { ///

The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -373,10 +381,13 @@ pub struct CreateWebACLRequest { ///

A friendly name or description of the WebACL. You can't change Name after you create the WebACL.

#[serde(rename = "Name")] pub name: String, + #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWebACLResponse { ///

The ChangeToken that you used to submit the CreateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -401,7 +412,7 @@ pub struct CreateXssMatchSetRequest { ///

The response to a CreateXssMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateXssMatchSetResponse { ///

The ChangeToken that you used to submit the CreateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -424,7 +435,7 @@ pub struct DeleteByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteByteMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -443,7 +454,7 @@ pub struct DeleteGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGeoMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -462,7 +473,7 @@ pub struct DeleteIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteIPSetResponse { ///

The ChangeToken that you used to submit the DeleteIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -478,7 +489,7 @@ pub struct DeleteLoggingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLoggingConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -489,7 +500,7 @@ pub struct DeletePermissionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeletePermissionPolicyResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -503,7 +514,7 @@ pub struct DeleteRateBasedRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRateBasedRuleResponse { ///

The ChangeToken that you used to submit the DeleteRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -522,7 +533,7 @@ pub struct DeleteRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRegexMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -541,7 +552,7 @@ pub struct DeleteRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRegexPatternSetResponse { ///

The ChangeToken that you used to submit the DeleteRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -560,7 +571,7 @@ pub struct DeleteRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRuleGroupResponse { ///

The ChangeToken that you used to submit the DeleteRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -579,7 +590,7 @@ pub struct DeleteRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteRuleResponse { ///

The ChangeToken that you used to submit the DeleteRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -598,7 +609,7 @@ pub struct DeleteSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSizeConstraintSetResponse { ///

The ChangeToken that you used to submit the DeleteSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -619,7 +630,7 @@ pub struct DeleteSqlInjectionMatchSetRequest { ///

The response to a request to delete a SqlInjectionMatchSet from AWS WAF.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSqlInjectionMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -638,7 +649,7 @@ pub struct DeleteWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWebACLResponse { ///

The ChangeToken that you used to submit the DeleteWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -659,7 +670,7 @@ pub struct DeleteXssMatchSetRequest { ///

The response to a request to delete an XssMatchSet from AWS WAF.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteXssMatchSetResponse { ///

The ChangeToken that you used to submit the DeleteXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -700,7 +711,7 @@ pub struct GeoMatchConstraint { ///

Contains one or more countries that AWS WAF will search for.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GeoMatchSet { ///

An array of GeoMatchConstraint objects, which contain the country that you want AWS WAF to search for.

#[serde(rename = "GeoMatchConstraints")] @@ -716,7 +727,7 @@ pub struct GeoMatchSet { ///

Contains the identifier and the name of the GeoMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GeoMatchSetSummary { ///

The GeoMatchSetId for an GeoMatchSet. You can use GeoMatchSetId in a GetGeoMatchSet request to get detailed information about an GeoMatchSet.

#[serde(rename = "GeoMatchSetId")] @@ -745,7 +756,7 @@ pub struct GetByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetByteMatchSetResponse { ///

Information about the ByteMatchSet that you specified in the GetByteMatchSet request. For more information, see the following topics:

  • ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name

  • ByteMatchTuples: Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains FieldToMatch, PositionalConstraint, TargetString, and TextTransformation

  • FieldToMatch: Contains Data and Type

#[serde(rename = "ByteMatchSet")] @@ -757,7 +768,7 @@ pub struct GetByteMatchSetResponse { pub struct GetChangeTokenRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetChangeTokenResponse { ///

The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus request to get the current status of the request.

#[serde(rename = "ChangeToken")] @@ -773,7 +784,7 @@ pub struct GetChangeTokenStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetChangeTokenStatusResponse { ///

The status of the change token.

#[serde(rename = "ChangeTokenStatus")] @@ -789,7 +800,7 @@ pub struct GetGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGeoMatchSetResponse { ///

Information about the GeoMatchSet that you specified in the GetGeoMatchSet request. This includes the Type, which for a GeoMatchContraint is always Country, as well as the Value, which is the identifier for a specific country.

#[serde(rename = "GeoMatchSet")] @@ -805,7 +816,7 @@ pub struct GetIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetIPSetResponse { ///

Information about the IPSet that you specified in the GetIPSet request. For more information, see the following topics:

  • IPSet: Contains IPSetDescriptors, IPSetId, and Name

  • IPSetDescriptors: Contains an array of IPSetDescriptor objects. Each IPSetDescriptor object contains Type and Value

#[serde(rename = "IPSet")] @@ -821,7 +832,7 @@ pub struct GetLoggingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetLoggingConfigurationResponse { ///

The LoggingConfiguration for the specified web ACL.

#[serde(rename = "LoggingConfiguration")] @@ -837,7 +848,7 @@ pub struct GetPermissionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetPermissionPolicyResponse { ///

The IAM policy attached to the specified RuleGroup.

#[serde(rename = "Policy")] @@ -857,7 +868,7 @@ pub struct GetRateBasedRuleManagedKeysRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRateBasedRuleManagedKeysResponse { ///

An array of IP addresses that currently are blocked by the specified RateBasedRule.

#[serde(rename = "ManagedKeys")] @@ -877,7 +888,7 @@ pub struct GetRateBasedRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRateBasedRuleResponse { ///

Information about the RateBasedRule that you specified in the GetRateBasedRule request.

#[serde(rename = "Rule")] @@ -893,7 +904,7 @@ pub struct GetRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRegexMatchSetResponse { ///

Information about the RegexMatchSet that you specified in the GetRegexMatchSet request. For more information, see RegexMatchTuple.

#[serde(rename = "RegexMatchSet")] @@ -909,7 +920,7 @@ pub struct GetRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRegexPatternSetResponse { ///

Information about the RegexPatternSet that you specified in the GetRegexPatternSet request, including the identifier of the pattern set and the regular expression patterns you want AWS WAF to search for.

#[serde(rename = "RegexPatternSet")] @@ -925,7 +936,7 @@ pub struct GetRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRuleGroupResponse { ///

Information about the RuleGroup that you specified in the GetRuleGroup request.

#[serde(rename = "RuleGroup")] @@ -941,7 +952,7 @@ pub struct GetRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetRuleResponse { ///

Information about the Rule that you specified in the GetRule request. For more information, see the following topics:

  • Rule: Contains MetricName, Name, an array of Predicate objects, and RuleId

  • Predicate: Each Predicate object contains DataId, Negated, and Type

#[serde(rename = "Rule")] @@ -966,7 +977,7 @@ pub struct GetSampledRequestsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSampledRequestsResponse { ///

The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems, the sample includes every request that your AWS resource received during the specified time range.

#[serde(rename = "PopulationSize")] @@ -990,7 +1001,7 @@ pub struct GetSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSizeConstraintSetResponse { ///

Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet request. For more information, see the following topics:

#[serde(rename = "SizeConstraintSet")] @@ -1008,7 +1019,7 @@ pub struct GetSqlInjectionMatchSetRequest { ///

The response to a GetSqlInjectionMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSqlInjectionMatchSetResponse { ///

Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet request. For more information, see the following topics:

#[serde(rename = "SqlInjectionMatchSet")] @@ -1024,7 +1035,7 @@ pub struct GetWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetWebACLResponse { ///

Information about the WebACL that you specified in the GetWebACL request. For more information, see the following topics:

  • WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, and WebACLId

  • DefaultAction (Data type is WafAction): Contains Type

  • Rules: Contains an array of ActivatedRule objects, which contain Action, Priority, and RuleId

  • Action: Contains Type

#[serde(rename = "WebACL")] @@ -1042,7 +1053,7 @@ pub struct GetXssMatchSetRequest { ///

The response to a GetXssMatchSet request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetXssMatchSetResponse { ///

Information about the XssMatchSet that you specified in the GetXssMatchSet request. For more information, see the following topics:

  • XssMatchSet: Contains Name, XssMatchSetId, and an array of XssMatchTuple objects

  • XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and TextTransformation

  • FieldToMatch: Contains Data and Type

#[serde(rename = "XssMatchSet")] @@ -1052,7 +1063,7 @@ pub struct GetXssMatchSetResponse { ///

The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HTTPHeader { ///

The name of one of the headers in the sampled web request.

#[serde(rename = "Name")] @@ -1066,7 +1077,7 @@ pub struct HTTPHeader { ///

The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HTTPRequest { ///

The IP address that the request originated from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

  • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request

  • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request

#[serde(rename = "ClientIP")] @@ -1096,7 +1107,7 @@ pub struct HTTPRequest { ///

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IPSet { ///

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

#[serde(rename = "IPSetDescriptors")] @@ -1123,7 +1134,7 @@ pub struct IPSetDescriptor { ///

Contains the identifier and the name of the IPSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct IPSetSummary { ///

The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get detailed information about an IPSet.

#[serde(rename = "IPSetId")] @@ -1161,7 +1172,7 @@ pub struct ListActivatedRulesInRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListActivatedRulesInRuleGroupResponse { ///

An array of ActivatedRules objects.

#[serde(rename = "ActivatedRules")] @@ -1186,7 +1197,7 @@ pub struct ListByteMatchSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListByteMatchSetsResponse { ///

An array of ByteMatchSetSummary objects.

#[serde(rename = "ByteMatchSets")] @@ -1211,7 +1222,7 @@ pub struct ListGeoMatchSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGeoMatchSetsResponse { ///

An array of GeoMatchSetSummary objects.

#[serde(rename = "GeoMatchSets")] @@ -1236,7 +1247,7 @@ pub struct ListIPSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListIPSetsResponse { ///

An array of IPSetSummary objects.

#[serde(rename = "IPSets")] @@ -1261,7 +1272,7 @@ pub struct ListLoggingConfigurationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListLoggingConfigurationsResponse { ///

An array of LoggingConfiguration objects.

#[serde(rename = "LoggingConfigurations")] @@ -1286,7 +1297,7 @@ pub struct ListRateBasedRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRateBasedRulesResponse { ///

If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules, submit another ListRateBasedRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1311,7 +1322,7 @@ pub struct ListRegexMatchSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRegexMatchSetsResponse { ///

If you have more RegexMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RegexMatchSet objects, submit another ListRegexMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1336,7 +1347,7 @@ pub struct ListRegexPatternSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRegexPatternSetsResponse { ///

If you have more RegexPatternSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RegexPatternSet objects, submit another ListRegexPatternSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1361,7 +1372,7 @@ pub struct ListRuleGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRuleGroupsResponse { ///

If you have more RuleGroups than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RuleGroups, submit another ListRuleGroups request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1386,7 +1397,7 @@ pub struct ListRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListRulesResponse { ///

If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules, submit another ListRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1411,7 +1422,7 @@ pub struct ListSizeConstraintSetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSizeConstraintSetsResponse { ///

If you have more SizeConstraintSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SizeConstraintSet objects, submit another ListSizeConstraintSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1438,7 +1449,7 @@ pub struct ListSqlInjectionMatchSetsRequest { ///

The response to a ListSqlInjectionMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSqlInjectionMatchSetsResponse { ///

If you have more SqlInjectionMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1463,7 +1474,7 @@ pub struct ListSubscribedRuleGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListSubscribedRuleGroupsResponse { ///

If you have more objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more objects, submit another ListSubscribedRuleGroups request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1475,6 +1486,29 @@ pub struct ListSubscribedRuleGroupsResponse { pub rule_groups: Option>, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct ListTagsForResourceRequest { + #[serde(rename = "Limit")] + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + #[serde(rename = "NextMarker")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, + #[serde(rename = "ResourceARN")] + pub resource_arn: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct ListTagsForResourceResponse { + #[serde(rename = "NextMarker")] + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, + #[serde(rename = "TagInfoForResource")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_info_for_resource: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListWebACLsRequest { ///

Specifies the number of WebACL objects that you want AWS WAF to return for this request. If you have more WebACL objects than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of WebACL objects.

@@ -1488,7 +1522,7 @@ pub struct ListWebACLsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWebACLsResponse { ///

If you have more WebACL objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more WebACL objects, submit another ListWebACLs request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1515,7 +1549,7 @@ pub struct ListXssMatchSetsRequest { ///

The response to a ListXssMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListXssMatchSetsResponse { ///

If you have more XssMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more XssMatchSet objects, submit another ListXssMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

#[serde(rename = "NextMarker")] @@ -1564,7 +1598,7 @@ pub struct PutLoggingConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutLoggingConfigurationResponse { ///

The LoggingConfiguration that you submitted in the request.

#[serde(rename = "LoggingConfiguration")] @@ -1583,12 +1617,12 @@ pub struct PutPermissionPolicyRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutPermissionPolicyResponse {} ///

A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule counts the number of requests that arrive from a specified IP address every five minutes. For example, based on recent requests that you've seen from an attacker, you might create a RateBasedRule that includes the following conditions:

  • The requests come from 192.0.2.44.

  • They contain the value BadBot in the User-Agent header.

In the rule, you also define the rate limit as 15,000.

Requests that meet both of these conditions and exceed 15,000 requests every five minutes trigger the rule's action (block or count), which is defined in the web ACL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RateBasedRule { ///

The Predicates object contains one Predicate element for each ByteMatchSet, IPSet, or SqlInjectionMatchSet object that you want to include in a RateBasedRule.

#[serde(rename = "MatchPredicates")] @@ -1614,7 +1648,7 @@ pub struct RateBasedRule { ///

In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains the RegexMatchSetId and Name of a RegexMatchSet, and the values that you specified when you updated the RegexMatchSet.

The values are contained in a RegexMatchTuple object, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexMatchSet { ///

A friendly name or description of the RegexMatchSet. You can't change Name after you create a RegexMatchSet.

#[serde(rename = "Name")] @@ -1632,7 +1666,7 @@ pub struct RegexMatchSet { ///

Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexMatchSetSummary { ///

A friendly name or description of the RegexMatchSet. You can't change Name after you create a RegexMatchSet.

#[serde(rename = "Name")] @@ -1669,7 +1703,7 @@ pub struct RegexMatchTuple { ///

The RegexPatternSet specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexPatternSet { ///

A friendly name or description of the RegexPatternSet. You can't change Name after you create a RegexPatternSet.

#[serde(rename = "Name")] @@ -1685,7 +1719,7 @@ pub struct RegexPatternSet { ///

Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegexPatternSetSummary { ///

A friendly name or description of the RegexPatternSet. You can't change Name after you create a RegexPatternSet.

#[serde(rename = "Name")] @@ -1708,7 +1742,7 @@ pub struct RegexPatternSetUpdate { ///

A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

  • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44

  • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Rule { ///

A friendly name or description for the metrics for this Rule. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change MetricName after you create the Rule.

#[serde(rename = "MetricName")] @@ -1728,7 +1762,7 @@ pub struct Rule { ///

A collection of predefined rules that you can add to a web ACL.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RuleGroup { ///

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the RuleGroup.

#[serde(rename = "MetricName")] @@ -1745,7 +1779,7 @@ pub struct RuleGroup { ///

Contains the identifier and the friendly name or description of the RuleGroup.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RuleGroupSummary { ///

A friendly name or description of the RuleGroup. You can't change the name of a RuleGroup after you create it.

#[serde(rename = "Name")] @@ -1768,7 +1802,7 @@ pub struct RuleGroupUpdate { ///

Contains the identifier and the friendly name or description of the Rule.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RuleSummary { ///

A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

#[serde(rename = "Name")] @@ -1791,7 +1825,7 @@ pub struct RuleUpdate { ///

The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SampledHTTPRequest { ///

The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT.

#[serde(rename = "Action")] @@ -1832,7 +1866,7 @@ pub struct SizeConstraint { ///

A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SizeConstraintSet { ///

The name, if any, of the SizeConstraintSet.

#[serde(rename = "Name")] @@ -1848,7 +1882,7 @@ pub struct SizeConstraintSet { ///

The Id and Name of a SizeConstraintSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SizeConstraintSetSummary { ///

The name of the SizeConstraintSet, if any.

#[serde(rename = "Name")] @@ -1871,7 +1905,7 @@ pub struct SizeConstraintSetUpdate { ///

A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SqlInjectionMatchSet { ///

The name, if any, of the SqlInjectionMatchSet.

#[serde(rename = "Name")] @@ -1887,7 +1921,7 @@ pub struct SqlInjectionMatchSet { ///

The Id and Name of a SqlInjectionMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SqlInjectionMatchSetSummary { ///

The name of the SqlInjectionMatchSet, if any, specified by Id.

#[serde(rename = "Name")] @@ -1921,7 +1955,7 @@ pub struct SqlInjectionMatchTuple { ///

A summary of the rule groups you are subscribed to.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SubscribedRuleGroupSummary { ///

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the RuleGroup.

#[serde(rename = "MetricName")] @@ -1934,6 +1968,39 @@ pub struct SubscribedRuleGroupSummary { pub rule_group_id: String, } +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Tag { + #[serde(rename = "Key")] + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(rename = "Value")] + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagInfoForResource { + #[serde(rename = "ResourceARN")] + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_arn: Option, + #[serde(rename = "TagList")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tag_list: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct TagResourceRequest { + #[serde(rename = "ResourceARN")] + pub resource_arn: String, + #[serde(rename = "Tags")] + pub tags: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct TagResourceResponse {} + ///

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TimeWindow { @@ -1945,6 +2012,18 @@ pub struct TimeWindow { pub start_time: f64, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct UntagResourceRequest { + #[serde(rename = "ResourceARN")] + pub resource_arn: String, + #[serde(rename = "TagKeys")] + pub tag_keys: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct UntagResourceResponse {} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateByteMatchSetRequest { ///

The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

@@ -1959,7 +2038,7 @@ pub struct UpdateByteMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateByteMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -1981,7 +2060,7 @@ pub struct UpdateGeoMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGeoMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2003,7 +2082,7 @@ pub struct UpdateIPSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIPSetResponse { ///

The ChangeToken that you used to submit the UpdateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2028,7 +2107,7 @@ pub struct UpdateRateBasedRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRateBasedRuleResponse { ///

The ChangeToken that you used to submit the UpdateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2050,7 +2129,7 @@ pub struct UpdateRegexMatchSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRegexMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2072,7 +2151,7 @@ pub struct UpdateRegexPatternSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRegexPatternSetResponse { ///

The ChangeToken that you used to submit the UpdateRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2094,7 +2173,7 @@ pub struct UpdateRuleGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRuleGroupResponse { ///

The ChangeToken that you used to submit the UpdateRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2116,7 +2195,7 @@ pub struct UpdateRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRuleResponse { ///

The ChangeToken that you used to submit the UpdateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2138,7 +2217,7 @@ pub struct UpdateSizeConstraintSetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSizeConstraintSetResponse { ///

The ChangeToken that you used to submit the UpdateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2162,7 +2241,7 @@ pub struct UpdateSqlInjectionMatchSetRequest { ///

The response to an UpdateSqlInjectionMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSqlInjectionMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2189,7 +2268,7 @@ pub struct UpdateWebACLRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateWebACLResponse { ///

The ChangeToken that you used to submit the UpdateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2213,7 +2292,7 @@ pub struct UpdateXssMatchSetRequest { ///

The response to an UpdateXssMatchSets request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateXssMatchSetResponse { ///

The ChangeToken that you used to submit the UpdateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

#[serde(rename = "ChangeToken")] @@ -2239,7 +2318,7 @@ pub struct WafOverrideAction { ///

Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WebACL { ///

The action to perform if none of the Rules contained in the WebACL match. The action is specified by the WafAction object.

#[serde(rename = "DefaultAction")] @@ -2266,7 +2345,7 @@ pub struct WebACL { ///

Contains the identifier and the name or description of the WebACL.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WebACLSummary { ///

A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

#[serde(rename = "Name")] @@ -2289,7 +2368,7 @@ pub struct WebACLUpdate { ///

A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct XssMatchSet { ///

The name, if any, of the XssMatchSet.

#[serde(rename = "Name")] @@ -2305,7 +2384,7 @@ pub struct XssMatchSet { ///

The Id and Name of an XssMatchSet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct XssMatchSetSummary { ///

The name of the XssMatchSet, if any, specified by Id.

#[serde(rename = "Name")] @@ -2539,6 +2618,7 @@ impl Error for CreateIPSetError { /// Errors returned by CreateRateBasedRule #[derive(Debug, PartialEq)] pub enum CreateRateBasedRuleError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2549,12 +2629,19 @@ pub enum CreateRateBasedRuleError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateRateBasedRuleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateRateBasedRuleError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateRateBasedRuleError::WAFDisallowedName( err.msg, @@ -2578,6 +2665,14 @@ impl CreateRateBasedRuleError { "WAFStaleDataException" => { return RusotoError::Service(CreateRateBasedRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateRateBasedRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + CreateRateBasedRuleError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2593,11 +2688,14 @@ impl fmt::Display for CreateRateBasedRuleError { impl Error for CreateRateBasedRuleError { fn description(&self) -> &str { match *self { + CreateRateBasedRuleError::WAFBadRequest(ref cause) => cause, CreateRateBasedRuleError::WAFDisallowedName(ref cause) => cause, CreateRateBasedRuleError::WAFInternalError(ref cause) => cause, CreateRateBasedRuleError::WAFInvalidParameter(ref cause) => cause, CreateRateBasedRuleError::WAFLimitsExceeded(ref cause) => cause, CreateRateBasedRuleError::WAFStaleData(ref cause) => cause, + CreateRateBasedRuleError::WAFTagOperation(ref cause) => cause, + CreateRateBasedRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -2718,6 +2816,7 @@ impl Error for CreateRegexPatternSetError { /// Errors returned by CreateRule #[derive(Debug, PartialEq)] pub enum CreateRuleError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2728,12 +2827,19 @@ pub enum CreateRuleError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateRuleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateRuleError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateRuleError::WAFDisallowedName(err.msg)) } @@ -2749,6 +2855,14 @@ impl CreateRuleError { "WAFStaleDataException" => { return RusotoError::Service(CreateRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(CreateRuleError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2764,17 +2878,21 @@ impl fmt::Display for CreateRuleError { impl Error for CreateRuleError { fn description(&self) -> &str { match *self { + CreateRuleError::WAFBadRequest(ref cause) => cause, CreateRuleError::WAFDisallowedName(ref cause) => cause, CreateRuleError::WAFInternalError(ref cause) => cause, CreateRuleError::WAFInvalidParameter(ref cause) => cause, CreateRuleError::WAFLimitsExceeded(ref cause) => cause, CreateRuleError::WAFStaleData(ref cause) => cause, + CreateRuleError::WAFTagOperation(ref cause) => cause, + CreateRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } /// Errors returned by CreateRuleGroup #[derive(Debug, PartialEq)] pub enum CreateRuleGroupError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2783,12 +2901,19 @@ pub enum CreateRuleGroupError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateRuleGroupError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateRuleGroupError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateRuleGroupError::WAFDisallowedName(err.msg)) } @@ -2801,6 +2926,14 @@ impl CreateRuleGroupError { "WAFStaleDataException" => { return RusotoError::Service(CreateRuleGroupError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateRuleGroupError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + CreateRuleGroupError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -2816,10 +2949,13 @@ impl fmt::Display for CreateRuleGroupError { impl Error for CreateRuleGroupError { fn description(&self) -> &str { match *self { + CreateRuleGroupError::WAFBadRequest(ref cause) => cause, CreateRuleGroupError::WAFDisallowedName(ref cause) => cause, CreateRuleGroupError::WAFInternalError(ref cause) => cause, CreateRuleGroupError::WAFLimitsExceeded(ref cause) => cause, CreateRuleGroupError::WAFStaleData(ref cause) => cause, + CreateRuleGroupError::WAFTagOperation(ref cause) => cause, + CreateRuleGroupError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -2978,6 +3114,7 @@ impl Error for CreateSqlInjectionMatchSetError { /// Errors returned by CreateWebACL #[derive(Debug, PartialEq)] pub enum CreateWebACLError { + WAFBadRequest(String), ///

The name specified is invalid.

WAFDisallowedName(String), ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

@@ -2990,12 +3127,19 @@ pub enum CreateWebACLError { WAFLimitsExceeded(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl CreateWebACLError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { if let Some(err) = proto::json::Error::parse(&res) { match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(CreateWebACLError::WAFBadRequest(err.msg)) + } "WAFDisallowedNameException" => { return RusotoError::Service(CreateWebACLError::WAFDisallowedName(err.msg)) } @@ -3014,6 +3158,14 @@ impl CreateWebACLError { "WAFStaleDataException" => { return RusotoError::Service(CreateWebACLError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(CreateWebACLError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(CreateWebACLError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3029,12 +3181,15 @@ impl fmt::Display for CreateWebACLError { impl Error for CreateWebACLError { fn description(&self) -> &str { match *self { + CreateWebACLError::WAFBadRequest(ref cause) => cause, CreateWebACLError::WAFDisallowedName(ref cause) => cause, CreateWebACLError::WAFInternalError(ref cause) => cause, CreateWebACLError::WAFInvalidAccount(ref cause) => cause, CreateWebACLError::WAFInvalidParameter(ref cause) => cause, CreateWebACLError::WAFLimitsExceeded(ref cause) => cause, CreateWebACLError::WAFStaleData(ref cause) => cause, + CreateWebACLError::WAFTagOperation(ref cause) => cause, + CreateWebACLError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3419,6 +3574,10 @@ pub enum DeleteRateBasedRuleError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteRateBasedRuleError { @@ -3453,6 +3612,14 @@ impl DeleteRateBasedRuleError { "WAFStaleDataException" => { return RusotoError::Service(DeleteRateBasedRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteRateBasedRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + DeleteRateBasedRuleError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3474,6 +3641,8 @@ impl Error for DeleteRateBasedRuleError { DeleteRateBasedRuleError::WAFNonexistentItem(ref cause) => cause, DeleteRateBasedRuleError::WAFReferencedItem(ref cause) => cause, DeleteRateBasedRuleError::WAFStaleData(ref cause) => cause, + DeleteRateBasedRuleError::WAFTagOperation(ref cause) => cause, + DeleteRateBasedRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3638,6 +3807,10 @@ pub enum DeleteRuleError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteRuleError { @@ -3662,6 +3835,14 @@ impl DeleteRuleError { "WAFStaleDataException" => { return RusotoError::Service(DeleteRuleError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteRuleError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(DeleteRuleError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3683,6 +3864,8 @@ impl Error for DeleteRuleError { DeleteRuleError::WAFNonexistentItem(ref cause) => cause, DeleteRuleError::WAFReferencedItem(ref cause) => cause, DeleteRuleError::WAFStaleData(ref cause) => cause, + DeleteRuleError::WAFTagOperation(ref cause) => cause, + DeleteRuleError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3701,6 +3884,10 @@ pub enum DeleteRuleGroupError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteRuleGroupError { @@ -3725,6 +3912,14 @@ impl DeleteRuleGroupError { "WAFStaleDataException" => { return RusotoError::Service(DeleteRuleGroupError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteRuleGroupError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + DeleteRuleGroupError::WAFTagOperationInternalError(err.msg), + ) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3746,6 +3941,8 @@ impl Error for DeleteRuleGroupError { DeleteRuleGroupError::WAFNonexistentItem(ref cause) => cause, DeleteRuleGroupError::WAFReferencedItem(ref cause) => cause, DeleteRuleGroupError::WAFStaleData(ref cause) => cause, + DeleteRuleGroupError::WAFTagOperation(ref cause) => cause, + DeleteRuleGroupError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -3916,6 +4113,10 @@ pub enum DeleteWebACLError { WAFReferencedItem(String), ///

The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

WAFStaleData(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), } impl DeleteWebACLError { @@ -3940,6 +4141,14 @@ impl DeleteWebACLError { "WAFStaleDataException" => { return RusotoError::Service(DeleteWebACLError::WAFStaleData(err.msg)) } + "WAFTagOperationException" => { + return RusotoError::Service(DeleteWebACLError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(DeleteWebACLError::WAFTagOperationInternalError( + err.msg, + )) + } "ValidationException" => return RusotoError::Validation(err.msg), _ => {} } @@ -3961,6 +4170,8 @@ impl Error for DeleteWebACLError { DeleteWebACLError::WAFNonexistentItem(ref cause) => cause, DeleteWebACLError::WAFReferencedItem(ref cause) => cause, DeleteWebACLError::WAFStaleData(ref cause) => cause, + DeleteWebACLError::WAFTagOperation(ref cause) => cause, + DeleteWebACLError::WAFTagOperationInternalError(ref cause) => cause, } } } @@ -5390,6 +5601,76 @@ impl Error for ListSubscribedRuleGroupsError { } } } +/// Errors returned by ListTagsForResource +#[derive(Debug, PartialEq)] +pub enum ListTagsForResourceError { + WAFBadRequest(String), + ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

+ WAFInternalError(String), + ///

The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

  • You specified an invalid parameter name.

  • You specified an invalid value.

  • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.

  • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to create a RateBasedRule with a RateKey value other than IP.

  • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, METHOD, QUERY_STRING, URI, or BODY.

  • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.

+ WAFInvalidParameter(String), + ///

The operation failed because the referenced object doesn't exist.

+ WAFNonexistentItem(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), +} + +impl ListTagsForResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(ListTagsForResourceError::WAFBadRequest(err.msg)) + } + "WAFInternalErrorException" => { + return RusotoError::Service(ListTagsForResourceError::WAFInternalError( + err.msg, + )) + } + "WAFInvalidParameterException" => { + return RusotoError::Service(ListTagsForResourceError::WAFInvalidParameter( + err.msg, + )) + } + "WAFNonexistentItemException" => { + return RusotoError::Service(ListTagsForResourceError::WAFNonexistentItem( + err.msg, + )) + } + "WAFTagOperationException" => { + return RusotoError::Service(ListTagsForResourceError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service( + ListTagsForResourceError::WAFTagOperationInternalError(err.msg), + ) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for ListTagsForResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for ListTagsForResourceError { + fn description(&self) -> &str { + match *self { + ListTagsForResourceError::WAFBadRequest(ref cause) => cause, + ListTagsForResourceError::WAFInternalError(ref cause) => cause, + ListTagsForResourceError::WAFInvalidParameter(ref cause) => cause, + ListTagsForResourceError::WAFNonexistentItem(ref cause) => cause, + ListTagsForResourceError::WAFTagOperation(ref cause) => cause, + ListTagsForResourceError::WAFTagOperationInternalError(ref cause) => cause, + } + } +} /// Errors returned by ListWebACLs #[derive(Debug, PartialEq)] pub enum ListWebACLsError { @@ -5584,6 +5865,140 @@ impl Error for PutPermissionPolicyError { } } } +/// Errors returned by TagResource +#[derive(Debug, PartialEq)] +pub enum TagResourceError { + WAFBadRequest(String), + ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

+ WAFInternalError(String), + ///

The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

  • You specified an invalid parameter name.

  • You specified an invalid value.

  • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.

  • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to create a RateBasedRule with a RateKey value other than IP.

  • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, METHOD, QUERY_STRING, URI, or BODY.

  • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.

+ WAFInvalidParameter(String), + ///

The operation exceeds a resource limit, for example, the maximum number of WebACL objects that you can create for an AWS account. For more information, see Limits in the AWS WAF Developer Guide.

+ WAFLimitsExceeded(String), + ///

The operation failed because the referenced object doesn't exist.

+ WAFNonexistentItem(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), +} + +impl TagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(TagResourceError::WAFBadRequest(err.msg)) + } + "WAFInternalErrorException" => { + return RusotoError::Service(TagResourceError::WAFInternalError(err.msg)) + } + "WAFInvalidParameterException" => { + return RusotoError::Service(TagResourceError::WAFInvalidParameter(err.msg)) + } + "WAFLimitsExceededException" => { + return RusotoError::Service(TagResourceError::WAFLimitsExceeded(err.msg)) + } + "WAFNonexistentItemException" => { + return RusotoError::Service(TagResourceError::WAFNonexistentItem(err.msg)) + } + "WAFTagOperationException" => { + return RusotoError::Service(TagResourceError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(TagResourceError::WAFTagOperationInternalError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for TagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for TagResourceError { + fn description(&self) -> &str { + match *self { + TagResourceError::WAFBadRequest(ref cause) => cause, + TagResourceError::WAFInternalError(ref cause) => cause, + TagResourceError::WAFInvalidParameter(ref cause) => cause, + TagResourceError::WAFLimitsExceeded(ref cause) => cause, + TagResourceError::WAFNonexistentItem(ref cause) => cause, + TagResourceError::WAFTagOperation(ref cause) => cause, + TagResourceError::WAFTagOperationInternalError(ref cause) => cause, + } + } +} +/// Errors returned by UntagResource +#[derive(Debug, PartialEq)] +pub enum UntagResourceError { + WAFBadRequest(String), + ///

The operation failed because of a system problem, even though the request was valid. Retry your request.

+ WAFInternalError(String), + ///

The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

  • You specified an invalid parameter name.

  • You specified an invalid value.

  • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.

  • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to create a RateBasedRule with a RateKey value other than IP.

  • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.

  • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, METHOD, QUERY_STRING, URI, or BODY.

  • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.

+ WAFInvalidParameter(String), + ///

The operation failed because the referenced object doesn't exist.

+ WAFNonexistentItem(String), + + WAFTagOperation(String), + + WAFTagOperationInternalError(String), +} + +impl UntagResourceError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "WAFBadRequestException" => { + return RusotoError::Service(UntagResourceError::WAFBadRequest(err.msg)) + } + "WAFInternalErrorException" => { + return RusotoError::Service(UntagResourceError::WAFInternalError(err.msg)) + } + "WAFInvalidParameterException" => { + return RusotoError::Service(UntagResourceError::WAFInvalidParameter(err.msg)) + } + "WAFNonexistentItemException" => { + return RusotoError::Service(UntagResourceError::WAFNonexistentItem(err.msg)) + } + "WAFTagOperationException" => { + return RusotoError::Service(UntagResourceError::WAFTagOperation(err.msg)) + } + "WAFTagOperationInternalErrorException" => { + return RusotoError::Service(UntagResourceError::WAFTagOperationInternalError( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for UntagResourceError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for UntagResourceError { + fn description(&self) -> &str { + match *self { + UntagResourceError::WAFBadRequest(ref cause) => cause, + UntagResourceError::WAFInternalError(ref cause) => cause, + UntagResourceError::WAFInvalidParameter(ref cause) => cause, + UntagResourceError::WAFNonexistentItem(ref cause) => cause, + UntagResourceError::WAFTagOperation(ref cause) => cause, + UntagResourceError::WAFTagOperationInternalError(ref cause) => cause, + } + } +} /// Errors returned by UpdateByteMatchSet #[derive(Debug, PartialEq)] pub enum UpdateByteMatchSetError { @@ -6969,6 +7384,11 @@ pub trait Waf { input: ListSubscribedRuleGroupsRequest, ) -> RusotoFuture; + fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture; + ///

Returns an array of WebACLSummary objects in the response.

fn list_web_ac_ls( &self, @@ -6993,6 +7413,16 @@ pub trait Waf { input: PutPermissionPolicyRequest, ) -> RusotoFuture; + fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture; + + fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture; + ///

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

fn update_byte_match_set( &self, @@ -7077,10 +7507,7 @@ impl WafClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> WafClient { - WafClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -7094,10 +7521,14 @@ impl WafClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - WafClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> WafClient { + WafClient { client, region } } } @@ -8703,6 +9134,33 @@ impl Waf for WafClient { }) } + fn list_tags_for_resource( + &self, + input: ListTagsForResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "waf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSWAF_20150824.ListTagsForResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response.buffer().from_err().and_then(|response| { + Err(ListTagsForResourceError::from_response(response)) + }), + ) + } + }) + } + ///

Returns an array of WebACLSummary objects in the response.

fn list_web_ac_ls( &self, @@ -8815,6 +9273,62 @@ impl Waf for WafClient { }) } + fn tag_resource( + &self, + input: TagResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "waf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSWAF_20150824.TagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(TagResourceError::from_response(response))), + ) + } + }) + } + + fn untag_resource( + &self, + input: UntagResourceRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "waf", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "AWSWAF_20150824.UntagResource"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(UntagResourceError::from_response(response))), + ) + } + }) + } + ///

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

fn update_byte_match_set( &self, diff --git a/rusoto/services/workdocs/Cargo.toml b/rusoto/services/workdocs/Cargo.toml index ba773f52807..02f498fcd98 100644 --- a/rusoto/services/workdocs/Cargo.toml +++ b/rusoto/services/workdocs/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_workdocs" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/workdocs/README.md b/rusoto/services/workdocs/README.md index 1a3f5233424..eab28f9a83b 100644 --- a/rusoto/services/workdocs/README.md +++ b/rusoto/services/workdocs/README.md @@ -23,9 +23,16 @@ To use `rusoto_workdocs` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_workdocs = "0.40.0" +rusoto_workdocs = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/workdocs/src/custom/mod.rs b/rusoto/services/workdocs/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/workdocs/src/custom/mod.rs +++ b/rusoto/services/workdocs/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/workdocs/src/generated.rs b/rusoto/services/workdocs/src/generated.rs index 417a0a12a82..a194d71dd70 100644 --- a/rusoto/services/workdocs/src/generated.rs +++ b/rusoto/services/workdocs/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto; @@ -51,7 +50,7 @@ pub struct ActivateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ActivateUserResponse { ///

The user information.

#[serde(rename = "User")] @@ -61,7 +60,7 @@ pub struct ActivateUserResponse { ///

Describes the activity information.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Activity { ///

Metadata of the commenting activity. This is an optional field and is filled for commenting activities.

#[serde(rename = "CommentMetadata")] @@ -120,7 +119,7 @@ pub struct AddResourcePermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AddResourcePermissionsResponse { ///

The share results.

#[serde(rename = "ShareResults")] @@ -130,7 +129,7 @@ pub struct AddResourcePermissionsResponse { ///

Describes a comment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Comment { ///

The ID of the comment.

#[serde(rename = "CommentId")] @@ -171,7 +170,7 @@ pub struct Comment { ///

Describes the metadata of a comment.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CommentMetadata { ///

The ID of the comment.

#[serde(rename = "CommentId")] @@ -229,7 +228,7 @@ pub struct CreateCommentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCommentResponse { ///

The comment that has been created.

#[serde(rename = "Comment")] @@ -256,7 +255,7 @@ pub struct CreateCustomMetadataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateCustomMetadataResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -275,7 +274,7 @@ pub struct CreateFolderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFolderResponse { ///

The metadata of the folder.

#[serde(rename = "Metadata")] @@ -298,7 +297,7 @@ pub struct CreateLabelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateLabelsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -318,7 +317,7 @@ pub struct CreateNotificationSubscriptionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateNotificationSubscriptionResponse { ///

The subscription.

#[serde(rename = "Subscription")] @@ -363,7 +362,7 @@ pub struct CreateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserResponse { ///

The user information.

#[serde(rename = "User")] @@ -423,7 +422,7 @@ pub struct DeleteCustomMetadataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteCustomMetadataResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -479,7 +478,7 @@ pub struct DeleteLabelsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteLabelsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -548,7 +547,7 @@ pub struct DescribeActivitiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeActivitiesResponse { ///

The marker for the next set of results.

#[serde(rename = "Marker")] @@ -583,7 +582,7 @@ pub struct DescribeCommentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCommentsResponse { ///

The list of comments for the specified document version.

#[serde(rename = "Comments")] @@ -623,7 +622,7 @@ pub struct DescribeDocumentVersionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDocumentVersionsResponse { ///

The document versions.

#[serde(rename = "DocumentVersions")] @@ -671,7 +670,7 @@ pub struct DescribeFolderContentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFolderContentsResponse { ///

The documents in the specified folder.

#[serde(rename = "Documents")] @@ -711,7 +710,7 @@ pub struct DescribeGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGroupsResponse { ///

The list of groups.

#[serde(rename = "Groups")] @@ -739,7 +738,7 @@ pub struct DescribeNotificationSubscriptionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeNotificationSubscriptionsResponse { ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

#[serde(rename = "Marker")] @@ -775,7 +774,7 @@ pub struct DescribeResourcePermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeResourcePermissionsResponse { ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

#[serde(rename = "Marker")] @@ -803,7 +802,7 @@ pub struct DescribeRootFoldersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeRootFoldersResponse { ///

The user's special folders.

#[serde(rename = "Folders")] @@ -860,7 +859,7 @@ pub struct DescribeUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUsersResponse { ///

The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

#[serde(rename = "Marker")] @@ -874,7 +873,7 @@ pub struct DescribeUsersResponse { ///

Describes the document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentMetadata { ///

The time when the document was created.

#[serde(rename = "CreatedTimestamp")] @@ -912,7 +911,7 @@ pub struct DocumentMetadata { ///

Describes a version of a document.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DocumentVersionMetadata { ///

The timestamp when the content of the document was originally created.

#[serde(rename = "ContentCreatedTimestamp")] @@ -970,7 +969,7 @@ pub struct DocumentVersionMetadata { ///

Describes a folder.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FolderMetadata { ///

The time when the folder was created.

#[serde(rename = "CreatedTimestamp")] @@ -1026,7 +1025,7 @@ pub struct GetCurrentUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetCurrentUserResponse { ///

Metadata of the user.

#[serde(rename = "User")] @@ -1058,7 +1057,7 @@ pub struct GetDocumentPathRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDocumentPathResponse { ///

The path information.

#[serde(rename = "Path")] @@ -1082,7 +1081,7 @@ pub struct GetDocumentRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDocumentResponse { ///

The custom metadata on the document.

#[serde(rename = "CustomMetadata")] @@ -1117,7 +1116,7 @@ pub struct GetDocumentVersionRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetDocumentVersionResponse { ///

The custom metadata on the document version.

#[serde(rename = "CustomMetadata")] @@ -1153,7 +1152,7 @@ pub struct GetFolderPathRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFolderPathResponse { ///

The path information.

#[serde(rename = "Path")] @@ -1177,7 +1176,7 @@ pub struct GetFolderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetFolderResponse { ///

The custom metadata on the folder.

#[serde(rename = "CustomMetadata")] @@ -1214,7 +1213,7 @@ pub struct GetResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetResourcesResponse { ///

The documents in the specified collection.

#[serde(rename = "Documents")] @@ -1232,7 +1231,7 @@ pub struct GetResourcesResponse { ///

Describes the metadata of a user group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupMetadata { ///

The ID of the user group.

#[serde(rename = "Id")] @@ -1280,7 +1279,7 @@ pub struct InitiateDocumentVersionUploadRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InitiateDocumentVersionUploadResponse { ///

The document metadata.

#[serde(rename = "Metadata")] @@ -1307,7 +1306,7 @@ pub struct NotificationOptions { ///

Describes the users or user groups.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Participants { ///

The list of user groups.

#[serde(rename = "Groups")] @@ -1321,7 +1320,7 @@ pub struct Participants { ///

Describes the permissions.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PermissionInfo { ///

The role of the user.

#[serde(rename = "Role")] @@ -1335,7 +1334,7 @@ pub struct PermissionInfo { ///

Describes a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Principal { ///

The ID of the resource.

#[serde(rename = "Id")] @@ -1382,7 +1381,7 @@ pub struct RemoveResourcePermissionRequest { ///

Describes the metadata of a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceMetadata { ///

The ID of the resource.

#[serde(rename = "Id")] @@ -1416,7 +1415,7 @@ pub struct ResourceMetadata { ///

Describes the path information of a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourcePath { ///

The components of the resource path.

#[serde(rename = "Components")] @@ -1426,7 +1425,7 @@ pub struct ResourcePath { ///

Describes the resource path.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourcePathComponent { ///

The ID of the resource path.

#[serde(rename = "Id")] @@ -1454,7 +1453,7 @@ pub struct SharePrincipal { ///

Describes the share results of a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ShareResult { ///

The ID of the invited user.

#[serde(rename = "InviteePrincipalId")] @@ -1497,7 +1496,7 @@ pub struct StorageRuleType { ///

Describes a subscription.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Subscription { ///

The endpoint of the subscription.

#[serde(rename = "EndPoint")] @@ -1617,7 +1616,7 @@ pub struct UpdateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateUserResponse { ///

The user information.

#[serde(rename = "User")] @@ -1627,7 +1626,7 @@ pub struct UpdateUserResponse { ///

Describes the upload.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UploadMetadata { ///

The signed headers.

#[serde(rename = "SignedHeaders")] @@ -1641,7 +1640,7 @@ pub struct UploadMetadata { ///

Describes a user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct User { ///

The time when the user was created.

#[serde(rename = "CreatedTimestamp")] @@ -1707,7 +1706,7 @@ pub struct User { ///

Describes the metadata of the user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserMetadata { ///

The email address of the user.

#[serde(rename = "EmailAddress")] @@ -1733,7 +1732,7 @@ pub struct UserMetadata { ///

Describes the storage for a user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserStorageMetadata { ///

The storage for a user.

#[serde(rename = "StorageRule")] @@ -4885,10 +4884,7 @@ impl WorkdocsClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> WorkdocsClient { - WorkdocsClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -4902,10 +4898,14 @@ impl WorkdocsClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - WorkdocsClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> WorkdocsClient { + WorkdocsClient { client, region } } } diff --git a/rusoto/services/worklink/Cargo.toml b/rusoto/services/worklink/Cargo.toml index 6b5681a5096..fe5e0a7e0d0 100644 --- a/rusoto/services/worklink/Cargo.toml +++ b/rusoto/services/worklink/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_worklink" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/worklink/README.md b/rusoto/services/worklink/README.md index 2135c0ef2b6..b9d1edde119 100644 --- a/rusoto/services/worklink/README.md +++ b/rusoto/services/worklink/README.md @@ -23,9 +23,16 @@ To use `rusoto_worklink` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_worklink = "0.40.0" +rusoto_worklink = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/worklink/src/custom/mod.rs b/rusoto/services/worklink/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/worklink/src/custom/mod.rs +++ b/rusoto/services/worklink/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/worklink/src/generated.rs b/rusoto/services/worklink/src/generated.rs index d8ecef28fa6..5beef9e79aa 100644 --- a/rusoto/services/worklink/src/generated.rs +++ b/rusoto/services/worklink/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -42,7 +41,7 @@ pub struct AssociateDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDomainResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -60,7 +59,7 @@ pub struct AssociateWebsiteAuthorizationProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateWebsiteAuthorizationProviderResponse { ///

A unique identifier for the authorization provider.

#[serde(rename = "AuthorizationProviderId")] @@ -83,7 +82,7 @@ pub struct AssociateWebsiteCertificateAuthorityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateWebsiteCertificateAuthorityResponse { ///

A unique identifier for the CA.

#[serde(rename = "WebsiteCaId")] @@ -107,7 +106,7 @@ pub struct CreateFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateFleetResponse { ///

The ARN of the fleet.

#[serde(rename = "FleetArn")] @@ -123,7 +122,7 @@ pub struct DeleteFleetRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteFleetResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -134,7 +133,7 @@ pub struct DescribeAuditStreamConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAuditStreamConfigurationResponse { ///

The ARN of the Amazon Kinesis data stream that will receive the audit events.

#[serde(rename = "AuditStreamArn")] @@ -150,7 +149,7 @@ pub struct DescribeCompanyNetworkConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeCompanyNetworkConfigurationResponse { ///

The security groups associated with access to the provided subnets.

#[serde(rename = "SecurityGroupIds")] @@ -174,7 +173,7 @@ pub struct DescribeDevicePolicyConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDevicePolicyConfigurationResponse { ///

The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.

#[serde(rename = "DeviceCaCertificate")] @@ -193,7 +192,7 @@ pub struct DescribeDeviceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDeviceResponse { ///

The date that the device first signed in to Amazon WorkLink.

#[serde(rename = "FirstAccessedTime")] @@ -244,7 +243,7 @@ pub struct DescribeDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeDomainResponse { ///

The ARN of an issued ACM certificate that is valid for the domain being associated.

#[serde(rename = "AcmCertificateArn")] @@ -276,7 +275,7 @@ pub struct DescribeFleetMetadataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeFleetMetadataResponse { ///

The identifier used by users to sign in to the Amazon WorkLink app.

#[serde(rename = "CompanyCode")] @@ -316,7 +315,7 @@ pub struct DescribeIdentityProviderConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeIdentityProviderConfigurationResponse { ///

The SAML metadata document provided by the user’s identity provider.

#[serde(rename = "IdentityProviderSamlMetadata")] @@ -343,7 +342,7 @@ pub struct DescribeWebsiteCertificateAuthorityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWebsiteCertificateAuthorityResponse { ///

The root certificate of the certificate authority.

#[serde(rename = "Certificate")] @@ -361,7 +360,7 @@ pub struct DescribeWebsiteCertificateAuthorityResponse { ///

The summary of devices.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeviceSummary { ///

The ID of the device.

#[serde(rename = "DeviceId")] @@ -384,7 +383,7 @@ pub struct DisassociateDomainRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDomainResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -398,7 +397,7 @@ pub struct DisassociateWebsiteAuthorizationProviderRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateWebsiteAuthorizationProviderResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -412,12 +411,12 @@ pub struct DisassociateWebsiteCertificateAuthorityRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateWebsiteCertificateAuthorityResponse {} ///

The summary of the domain.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DomainSummary { ///

The time that the domain was created.

#[serde(rename = "CreatedTime")] @@ -436,7 +435,7 @@ pub struct DomainSummary { ///

The summary of the fleet.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FleetSummary { ///

The identifier used by users to sign into the Amazon WorkLink app.

#[serde(rename = "CompanyCode")] @@ -484,7 +483,7 @@ pub struct ListDevicesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDevicesResponse { ///

Information about the devices.

#[serde(rename = "Devices")] @@ -512,7 +511,7 @@ pub struct ListDomainsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListDomainsResponse { ///

Information about the domains.

#[serde(rename = "Domains")] @@ -537,7 +536,7 @@ pub struct ListFleetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListFleetsResponse { ///

The summary list of the fleets.

#[serde(rename = "FleetSummaryList")] @@ -565,7 +564,7 @@ pub struct ListWebsiteAuthorizationProvidersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWebsiteAuthorizationProvidersResponse { ///

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

#[serde(rename = "NextToken")] @@ -593,7 +592,7 @@ pub struct ListWebsiteCertificateAuthoritiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListWebsiteCertificateAuthoritiesResponse { ///

The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.

#[serde(rename = "NextToken")] @@ -616,7 +615,7 @@ pub struct RestoreDomainAccessRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RestoreDomainAccessResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -630,7 +629,7 @@ pub struct RevokeDomainAccessRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RevokeDomainAccessResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -644,7 +643,7 @@ pub struct SignOutUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SignOutUserResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -659,7 +658,7 @@ pub struct UpdateAuditStreamConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateAuditStreamConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -679,7 +678,7 @@ pub struct UpdateCompanyNetworkConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateCompanyNetworkConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -694,7 +693,7 @@ pub struct UpdateDevicePolicyConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDevicePolicyConfigurationResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -712,7 +711,7 @@ pub struct UpdateDomainMetadataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateDomainMetadataResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -731,7 +730,7 @@ pub struct UpdateFleetMetadataRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateFleetMetadataResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -749,12 +748,12 @@ pub struct UpdateIdentityProviderConfigurationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateIdentityProviderConfigurationResponse {} ///

The summary of the website authorization provider.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WebsiteAuthorizationProviderSummary { ///

A unique identifier for the authorization provider.

#[serde(rename = "AuthorizationProviderId")] @@ -775,7 +774,7 @@ pub struct WebsiteAuthorizationProviderSummary { ///

The summary of the certificate authority (CA).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WebsiteCaSummary { ///

The time when the CA was added.

#[serde(rename = "CreatedTime")] @@ -2959,10 +2958,7 @@ impl WorklinkClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> WorklinkClient { - WorklinkClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2976,10 +2972,14 @@ impl WorklinkClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - WorklinkClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> WorklinkClient { + WorklinkClient { client, region } } } diff --git a/rusoto/services/workmail/Cargo.toml b/rusoto/services/workmail/Cargo.toml index 35d5f66e2c0..7e49a867767 100644 --- a/rusoto/services/workmail/Cargo.toml +++ b/rusoto/services/workmail/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_workmail" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/workmail/README.md b/rusoto/services/workmail/README.md index 89e7e0c8b39..3805c2f18c9 100644 --- a/rusoto/services/workmail/README.md +++ b/rusoto/services/workmail/README.md @@ -23,9 +23,16 @@ To use `rusoto_workmail` in your application, add it as a dependency in your `Ca ```toml [dependencies] -rusoto_workmail = "0.40.0" +rusoto_workmail = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/workmail/src/custom/mod.rs b/rusoto/services/workmail/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/workmail/src/custom/mod.rs +++ b/rusoto/services/workmail/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/workmail/src/generated.rs b/rusoto/services/workmail/src/generated.rs index b596996e12e..0f1da2ddcb6 100644 --- a/rusoto/services/workmail/src/generated.rs +++ b/rusoto/services/workmail/src/generated.rs @@ -9,17 +9,16 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; @@ -38,7 +37,7 @@ pub struct AssociateDelegateToResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateDelegateToResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -55,7 +54,7 @@ pub struct AssociateMemberToGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateMemberToGroupResponse {} ///

At least one delegate must be associated to the resource to disable automatic replies from the resource.

@@ -89,7 +88,7 @@ pub struct CreateAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateAliasResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -103,7 +102,7 @@ pub struct CreateGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupResponse { ///

The identifier of the group.

#[serde(rename = "GroupId")] @@ -125,7 +124,7 @@ pub struct CreateResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateResourceResponse { ///

The identifier of the new resource.

#[serde(rename = "ResourceId")] @@ -150,7 +149,7 @@ pub struct CreateUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateUserResponse { ///

The identifier for the new user.

#[serde(rename = "UserId")] @@ -160,7 +159,7 @@ pub struct CreateUserResponse { ///

The name of the attribute, which is one of the values defined in the UserAttribute enumeration.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Delegate { ///

The identifier for the user or group associated as the resource's delegate.

#[serde(rename = "Id")] @@ -184,7 +183,7 @@ pub struct DeleteAliasRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteAliasResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -198,7 +197,7 @@ pub struct DeleteGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -215,7 +214,7 @@ pub struct DeleteMailboxPermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteMailboxPermissionsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -229,7 +228,7 @@ pub struct DeleteResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -243,7 +242,7 @@ pub struct DeleteUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteUserResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -257,7 +256,7 @@ pub struct DeregisterFromWorkMailRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeregisterFromWorkMailResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -271,7 +270,7 @@ pub struct DescribeGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeGroupResponse { ///

The date and time when a user was deregistered from WorkMail, in UNIX epoch time format.

#[serde(rename = "DisabledDate")] @@ -307,7 +306,7 @@ pub struct DescribeOrganizationRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeOrganizationResponse { ///

The alias for an organization.

#[serde(rename = "Alias")] @@ -354,7 +353,7 @@ pub struct DescribeResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeResourceResponse { ///

The booking options for the described resource.

#[serde(rename = "BookingOptions")] @@ -401,7 +400,7 @@ pub struct DescribeUserRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeUserResponse { ///

The date and time at which the user was disabled for Amazon WorkMail usage, in UNIX epoch time format.

#[serde(rename = "DisabledDate")] @@ -451,7 +450,7 @@ pub struct DisassociateDelegateFromResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateDelegateFromResourceResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -468,7 +467,7 @@ pub struct DisassociateMemberFromGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateMemberFromGroupResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -482,7 +481,7 @@ pub struct GetMailboxDetailsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetMailboxDetailsResponse { ///

The maximum allowed mailbox size, in MB, for the specified user.

#[serde(rename = "MailboxQuota")] @@ -496,7 +495,7 @@ pub struct GetMailboxDetailsResponse { ///

The representation of an Amazon WorkMail group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Group { ///

The date indicating when the group was disabled from Amazon WorkMail use.

#[serde(rename = "DisabledDate")] @@ -543,7 +542,7 @@ pub struct ListAliasesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAliasesResponse { ///

The entity's paginated aliases.

#[serde(rename = "Aliases")] @@ -574,7 +573,7 @@ pub struct ListGroupMembersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupMembersResponse { ///

The members associated to the group.

#[serde(rename = "Members")] @@ -602,7 +601,7 @@ pub struct ListGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListGroupsResponse { ///

The overview of groups for an organization.

#[serde(rename = "Groups")] @@ -633,7 +632,7 @@ pub struct ListMailboxPermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListMailboxPermissionsResponse { ///

The token to use to retrieve the next page of results. The value is "null" when there are no more results to return.

#[serde(rename = "NextToken")] @@ -658,7 +657,7 @@ pub struct ListOrganizationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListOrganizationsResponse { ///

The token to use to retrieve the next page of results. The value is "null" when there are no more results to return.

#[serde(rename = "NextToken")] @@ -689,7 +688,7 @@ pub struct ListResourceDelegatesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourceDelegatesResponse { ///

One page of the resource's delegates.

#[serde(rename = "Delegates")] @@ -717,7 +716,7 @@ pub struct ListResourcesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListResourcesResponse { ///

The token used to paginate through all the organization's resources. While results are still available, it has an associated value. When the last page is reached, the token is empty.

#[serde(rename = "NextToken")] @@ -745,7 +744,7 @@ pub struct ListUsersRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListUsersResponse { ///

The token to use to retrieve the next page of results. This value is `null` when there are no more results to return.

#[serde(rename = "NextToken")] @@ -759,7 +758,7 @@ pub struct ListUsersResponse { ///

The representation of a user or group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Member { ///

The date indicating when the member was disabled from Amazon WorkMail use.

#[serde(rename = "DisabledDate")] @@ -789,7 +788,7 @@ pub struct Member { ///

The representation of an organization.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OrganizationSummary { ///

The alias associated with the organization.

#[serde(rename = "Alias")] @@ -811,7 +810,7 @@ pub struct OrganizationSummary { ///

Permission granted to a user, group, or resource to access a certain aspect of another user, group, or resource mailbox.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Permission { ///

The identifier of the user, group, or resource to which the permissions are granted.

#[serde(rename = "GranteeId")] @@ -841,7 +840,7 @@ pub struct PutMailboxPermissionsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutMailboxPermissionsResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -858,7 +857,7 @@ pub struct RegisterToWorkMailRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RegisterToWorkMailResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -875,12 +874,12 @@ pub struct ResetPasswordRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResetPasswordResponse {} ///

The representation of a resource.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Resource { ///

The date indicating when the resource was disabled from Amazon WorkMail use.

#[serde(rename = "DisabledDate")] @@ -926,7 +925,7 @@ pub struct UpdateMailboxQuotaRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateMailboxQuotaResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -943,7 +942,7 @@ pub struct UpdatePrimaryEmailAddressRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdatePrimaryEmailAddressResponse {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -965,12 +964,12 @@ pub struct UpdateResourceRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateResourceResponse {} ///

The representation of an Amazon WorkMail user.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct User { ///

The date indicating when the user was disabled from Amazon WorkMail use.

#[serde(rename = "DisabledDate")] @@ -3411,10 +3410,7 @@ impl WorkmailClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> WorkmailClient { - WorkmailClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -3428,10 +3424,14 @@ impl WorkmailClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - WorkmailClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> WorkmailClient { + WorkmailClient { client, region } } } diff --git a/rusoto/services/workspaces/Cargo.toml b/rusoto/services/workspaces/Cargo.toml index 688fa6d23e1..32dbde56920 100644 --- a/rusoto/services/workspaces/Cargo.toml +++ b/rusoto/services/workspaces/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_workspaces" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/workspaces/README.md b/rusoto/services/workspaces/README.md index 4c3d74395cf..d714555f635 100644 --- a/rusoto/services/workspaces/README.md +++ b/rusoto/services/workspaces/README.md @@ -23,9 +23,16 @@ To use `rusoto_workspaces` in your application, add it as a dependency in your ` ```toml [dependencies] -rusoto_workspaces = "0.40.0" +rusoto_workspaces = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/workspaces/src/custom/mod.rs b/rusoto/services/workspaces/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/workspaces/src/custom/mod.rs +++ b/rusoto/services/workspaces/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/workspaces/src/generated.rs b/rusoto/services/workspaces/src/generated.rs index 54b31144c52..9eb2af7ef65 100644 --- a/rusoto/services/workspaces/src/generated.rs +++ b/rusoto/services/workspaces/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

Describes a modification to the configuration of bring your own license (BYOL) for the specified account.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AccountModification { ///

The IP address range, specified as an IPv4 CIDR block, for the management network interface used for the account.

#[serde(rename = "DedicatedTenancyManagementCidrRange")] @@ -65,7 +64,7 @@ pub struct AssociateIpGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AssociateIpGroupsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -79,7 +78,7 @@ pub struct AuthorizeIpRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AuthorizeIpRulesResult {} ///

Describes an Amazon WorkSpaces client.

@@ -93,7 +92,7 @@ pub struct ClientProperties { ///

Information about the Amazon WorkSpaces client.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ClientPropertiesResult { ///

Information about the Amazon WorkSpaces client.

#[serde(rename = "ClientProperties")] @@ -107,7 +106,7 @@ pub struct ClientPropertiesResult { ///

Describes the compute type.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ComputeType { ///

The compute type.

#[serde(rename = "Name")] @@ -115,6 +114,36 @@ pub struct ComputeType { pub name: Option, } +#[derive(Default, Debug, Clone, PartialEq, Serialize)] +pub struct CopyWorkspaceImageRequest { + ///

A description of the image.

+ #[serde(rename = "Description")] + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + ///

The name of the image.

+ #[serde(rename = "Name")] + pub name: String, + ///

The identifier of the source image.

+ #[serde(rename = "SourceImageId")] + pub source_image_id: String, + ///

The identifier of the source Region.

+ #[serde(rename = "SourceRegion")] + pub source_region: String, + ///

The tags for the image.

+ #[serde(rename = "Tags")] + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] +pub struct CopyWorkspaceImageResult { + ///

The identifier of the image.

+ #[serde(rename = "ImageId")] + #[serde(skip_serializing_if = "Option::is_none")] + pub image_id: Option, +} + #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateIpGroupRequest { ///

The description of the group.

@@ -135,7 +164,7 @@ pub struct CreateIpGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateIpGroupResult { ///

The identifier of the group.

#[serde(rename = "GroupId")] @@ -154,7 +183,7 @@ pub struct CreateTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateTagsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -165,7 +194,7 @@ pub struct CreateWorkspacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateWorkspacesResult { ///

Information about the WorkSpaces that could not be created.

#[serde(rename = "FailedRequests")] @@ -179,7 +208,7 @@ pub struct CreateWorkspacesResult { ///

Describes the default values used to create a WorkSpace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DefaultWorkspaceCreationProperties { ///

The identifier of any security groups to apply to WorkSpaces when they are created.

#[serde(rename = "CustomSecurityGroupId")] @@ -211,7 +240,7 @@ pub struct DeleteIpGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteIpGroupResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -225,7 +254,7 @@ pub struct DeleteTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteTagsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -236,7 +265,7 @@ pub struct DeleteWorkspaceImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteWorkspaceImageResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -248,7 +277,7 @@ pub struct DescribeAccountModificationsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAccountModificationsResult { ///

The list of modifications to the configuration of BYOL.

#[serde(rename = "AccountModifications")] @@ -264,7 +293,7 @@ pub struct DescribeAccountModificationsResult { pub struct DescribeAccountRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeAccountResult { ///

The IP address range, specified as an IPv4 CIDR block, used for the management network interface.

The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

#[serde(rename = "DedicatedTenancyManagementCidrRange")] @@ -284,7 +313,7 @@ pub struct DescribeClientPropertiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeClientPropertiesResult { ///

Information about the specified Amazon WorkSpaces clients.

#[serde(rename = "ClientPropertiesList")] @@ -309,7 +338,7 @@ pub struct DescribeIpGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeIpGroupsResult { ///

The token to use to retrieve the next set of results, or null if no more results are available.

#[serde(rename = "NextToken")] @@ -329,7 +358,7 @@ pub struct DescribeTagsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeTagsResult { ///

The tags.

#[serde(rename = "TagList")] @@ -354,7 +383,7 @@ pub struct DescribeWorkspaceBundlesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkspaceBundlesResult { ///

Information about the bundles.

#[serde(rename = "Bundles")] @@ -379,7 +408,7 @@ pub struct DescribeWorkspaceDirectoriesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkspaceDirectoriesResult { ///

Information about the directories.

#[serde(rename = "Directories")] @@ -408,7 +437,7 @@ pub struct DescribeWorkspaceImagesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkspaceImagesResult { ///

Information about the images.

#[serde(rename = "Images")] @@ -433,7 +462,7 @@ pub struct DescribeWorkspacesConnectionStatusRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkspacesConnectionStatusResult { ///

The token to use to retrieve the next set of results, or null if no more results are available.

#[serde(rename = "NextToken")] @@ -474,7 +503,7 @@ pub struct DescribeWorkspacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DescribeWorkspacesResult { ///

The token to use to retrieve the next set of results, or null if no more results are available.

#[serde(rename = "NextToken")] @@ -497,12 +526,12 @@ pub struct DisassociateIpGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DisassociateIpGroupsResult {} ///

Describes a WorkSpace that cannot be created.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedCreateWorkspaceRequest { ///

The error code that is returned if the WorkSpace cannot be created.

#[serde(rename = "ErrorCode")] @@ -520,7 +549,7 @@ pub struct FailedCreateWorkspaceRequest { ///

Describes a WorkSpace that could not be rebooted. (RebootWorkspaces), rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), or stopped (StopWorkspaces).

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FailedWorkspaceChangeRequest { ///

The error code that is returned if the WorkSpace cannot be rebooted.

#[serde(rename = "ErrorCode")] @@ -557,7 +586,7 @@ pub struct ImportWorkspaceImageRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ImportWorkspaceImageResult { ///

The identifier of the WorkSpace image.

#[serde(rename = "ImageId")] @@ -594,7 +623,7 @@ pub struct ListAvailableManagementCidrRangesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ListAvailableManagementCidrRangesResult { ///

The list of available IP address ranges, specified as IPv4 CIDR blocks.

#[serde(rename = "ManagementCidrRanges")] @@ -608,7 +637,7 @@ pub struct ListAvailableManagementCidrRangesResult { ///

Describes a WorkSpace modification.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModificationState { ///

The resource.

#[serde(rename = "Resource")] @@ -633,7 +662,7 @@ pub struct ModifyAccountRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyAccountResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -647,7 +676,7 @@ pub struct ModifyClientPropertiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyClientPropertiesResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -661,7 +690,7 @@ pub struct ModifyWorkspacePropertiesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyWorkspacePropertiesResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -675,12 +704,12 @@ pub struct ModifyWorkspaceStateRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ModifyWorkspaceStateResult {} ///

The operating system that the image is running.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct OperatingSystem { ///

The operating system.

#[serde(rename = "Type")] @@ -704,7 +733,7 @@ pub struct RebootWorkspacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebootWorkspacesResult { ///

Information about the WorkSpaces that could not be rebooted.

#[serde(rename = "FailedRequests")] @@ -722,17 +751,13 @@ pub struct RebuildRequest { #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RebuildWorkspacesRequest { - ///

Reserved.

- #[serde(rename = "AdditionalInfo")] - #[serde(skip_serializing_if = "Option::is_none")] - pub additional_info: Option, ///

The WorkSpace to rebuild. You can specify a single WorkSpace.

#[serde(rename = "RebuildWorkspaceRequests")] pub rebuild_workspace_requests: Vec, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RebuildWorkspacesResult { ///

Information about the WorkSpace that could not be rebuilt.

#[serde(rename = "FailedRequests")] @@ -751,12 +776,12 @@ pub struct RevokeIpRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RevokeIpRulesResult {} ///

Describes the root volume for a WorkSpace bundle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RootStorage { ///

The size of the root volume.

#[serde(rename = "Capacity")] @@ -781,7 +806,7 @@ pub struct StartWorkspacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StartWorkspacesResult { ///

Information about the WorkSpaces that could not be started.

#[serde(rename = "FailedRequests")] @@ -806,7 +831,7 @@ pub struct StopWorkspacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct StopWorkspacesResult { ///

Information about the WorkSpaces that could not be stopped.

#[serde(rename = "FailedRequests")] @@ -842,7 +867,7 @@ pub struct TerminateWorkspacesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TerminateWorkspacesResult { ///

Information about the WorkSpaces that could not be terminated.

#[serde(rename = "FailedRequests")] @@ -861,12 +886,12 @@ pub struct UpdateRulesOfIpGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateRulesOfIpGroupResult {} ///

Describes the user storage for a WorkSpace bundle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UserStorage { ///

The size of the user storage.

#[serde(rename = "Capacity")] @@ -876,7 +901,7 @@ pub struct UserStorage { ///

Describes a WorkSpace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Workspace { ///

The identifier of the bundle used to create the WorkSpace.

#[serde(rename = "BundleId")] @@ -942,7 +967,7 @@ pub struct Workspace { ///

Describes a WorkSpace bundle.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkspaceBundle { ///

The bundle identifier.

#[serde(rename = "BundleId")] @@ -976,7 +1001,7 @@ pub struct WorkspaceBundle { ///

Describes the connection status of a WorkSpace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkspaceConnectionStatus { ///

The connection state of the WorkSpace. The connection state is unknown if the WorkSpace is stopped.

#[serde(rename = "ConnectionState")] @@ -998,7 +1023,7 @@ pub struct WorkspaceConnectionStatus { ///

Describes an AWS Directory Service directory that is used with Amazon WorkSpaces.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkspaceDirectory { ///

The directory alias.

#[serde(rename = "Alias")] @@ -1056,7 +1081,7 @@ pub struct WorkspaceDirectory { ///

Describes a WorkSpace image.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkspaceImage { ///

The description of the image.

#[serde(rename = "Description")] @@ -1153,7 +1178,7 @@ pub struct WorkspaceRequest { ///

Describes an IP access control group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct WorkspacesIpGroup { ///

The description of the group.

#[serde(rename = "groupDesc")] @@ -1307,6 +1332,85 @@ impl Error for AuthorizeIpRulesError { } } } +/// Errors returned by CopyWorkspaceImage +#[derive(Debug, PartialEq)] +pub enum CopyWorkspaceImageError { + ///

The user is not authorized to access a resource.

+ AccessDenied(String), + ///

One or more parameter values are not valid.

+ InvalidParameterValues(String), + ///

This operation is not supported.

+ OperationNotSupported(String), + ///

The specified resource already exists.

+ ResourceAlreadyExists(String), + ///

Your resource limits have been exceeded.

+ ResourceLimitExceeded(String), + ///

The resource could not be found.

+ ResourceNotFound(String), + ///

The specified resource is not available.

+ ResourceUnavailable(String), +} + +impl CopyWorkspaceImageError { + pub fn from_response(res: BufferedHttpResponse) -> RusotoError { + if let Some(err) = proto::json::Error::parse(&res) { + match err.typ.as_str() { + "AccessDeniedException" => { + return RusotoError::Service(CopyWorkspaceImageError::AccessDenied(err.msg)) + } + "InvalidParameterValuesException" => { + return RusotoError::Service(CopyWorkspaceImageError::InvalidParameterValues( + err.msg, + )) + } + "OperationNotSupportedException" => { + return RusotoError::Service(CopyWorkspaceImageError::OperationNotSupported( + err.msg, + )) + } + "ResourceAlreadyExistsException" => { + return RusotoError::Service(CopyWorkspaceImageError::ResourceAlreadyExists( + err.msg, + )) + } + "ResourceLimitExceededException" => { + return RusotoError::Service(CopyWorkspaceImageError::ResourceLimitExceeded( + err.msg, + )) + } + "ResourceNotFoundException" => { + return RusotoError::Service(CopyWorkspaceImageError::ResourceNotFound(err.msg)) + } + "ResourceUnavailableException" => { + return RusotoError::Service(CopyWorkspaceImageError::ResourceUnavailable( + err.msg, + )) + } + "ValidationException" => return RusotoError::Validation(err.msg), + _ => {} + } + } + return RusotoError::Unknown(res); + } +} +impl fmt::Display for CopyWorkspaceImageError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +impl Error for CopyWorkspaceImageError { + fn description(&self) -> &str { + match *self { + CopyWorkspaceImageError::AccessDenied(ref cause) => cause, + CopyWorkspaceImageError::InvalidParameterValues(ref cause) => cause, + CopyWorkspaceImageError::OperationNotSupported(ref cause) => cause, + CopyWorkspaceImageError::ResourceAlreadyExists(ref cause) => cause, + CopyWorkspaceImageError::ResourceLimitExceeded(ref cause) => cause, + CopyWorkspaceImageError::ResourceNotFound(ref cause) => cause, + CopyWorkspaceImageError::ResourceUnavailable(ref cause) => cause, + } + } +} /// Errors returned by CreateIpGroup #[derive(Debug, PartialEq)] pub enum CreateIpGroupError { @@ -2653,6 +2757,12 @@ pub trait Workspaces { input: AuthorizeIpRulesRequest, ) -> RusotoFuture; + ///

Copies the specified image from the specified Region to the current Region.

+ fn copy_workspace_image( + &self, + input: CopyWorkspaceImageRequest, + ) -> RusotoFuture; + ///

Creates an IP access control group.

An IP access control group provides you with the ability to control the IP addresses from which users are allowed to access their WorkSpaces. To specify the CIDR address ranges, add rules to your IP access control group and then associate the group with your directory. You can add rules when you create the group or at any time using AuthorizeIpRules.

There is a default IP access control group associated with your directory. If you don't associate an IP access control group with your directory, the default group is used. The default group includes a default rule that allows users to access their WorkSpaces from anywhere. You cannot modify the default IP access control group for your directory.

fn create_ip_group( &self, @@ -2683,7 +2793,7 @@ pub trait Workspaces { input: DeleteTagsRequest, ) -> RusotoFuture; - ///

Deletes the specified image from your account. To delete an image, you must first delete any bundles that are associated with the image.

+ ///

Deletes the specified image from your account. To delete an image, you must first delete any bundles that are associated with the image and un-share the image if it is shared with other accounts.

fn delete_workspace_image( &self, input: DeleteWorkspaceImageRequest, @@ -2785,7 +2895,7 @@ pub trait Workspaces { input: ModifyWorkspacePropertiesRequest, ) -> RusotoFuture; - ///

Sets the state of the specified WorkSpace.

To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to reboot, stop, start, or rebuild. An AutoStop WorkSpace in this state is not stopped. Users can log into a WorkSpace in the ADMIN_MAINTENANCE state.

+ ///

Sets the state of the specified WorkSpace.

To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE state.

fn modify_workspace_state( &self, input: ModifyWorkspaceStateRequest, @@ -2845,10 +2955,7 @@ impl WorkspacesClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> WorkspacesClient { - WorkspacesClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2862,10 +2969,14 @@ impl WorkspacesClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - WorkspacesClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> WorkspacesClient { + WorkspacesClient { client, region } } } @@ -2928,6 +3039,35 @@ impl Workspaces for WorkspacesClient { }) } + ///

Copies the specified image from the specified Region to the current Region.

+ fn copy_workspace_image( + &self, + input: CopyWorkspaceImageRequest, + ) -> RusotoFuture { + let mut request = SignedRequest::new("POST", "workspaces", &self.region, "/"); + + request.set_content_type("application/x-amz-json-1.1".to_owned()); + request.add_header("x-amz-target", "WorkspacesService.CopyWorkspaceImage"); + let encoded = serde_json::to_string(&input).unwrap(); + request.set_payload(Some(encoded)); + + self.client.sign_and_dispatch(request, |response| { + if response.status.is_success() { + Box::new(response.buffer().from_err().and_then(|response| { + proto::json::ResponsePayload::new(&response) + .deserialize::() + })) + } else { + Box::new( + response + .buffer() + .from_err() + .and_then(|response| Err(CopyWorkspaceImageError::from_response(response))), + ) + } + }) + } + ///

Creates an IP access control group.

An IP access control group provides you with the ability to control the IP addresses from which users are allowed to access their WorkSpaces. To specify the CIDR address ranges, add rules to your IP access control group and then associate the group with your directory. You can add rules when you create the group or at any time using AuthorizeIpRules.

There is a default IP access control group associated with your directory. If you don't associate an IP access control group with your directory, the default group is used. The default group includes a default rule that allows users to access their WorkSpaces from anywhere. You cannot modify the default IP access control group for your directory.

fn create_ip_group( &self, @@ -3073,7 +3213,7 @@ impl Workspaces for WorkspacesClient { }) } - ///

Deletes the specified image from your account. To delete an image, you must first delete any bundles that are associated with the image.

+ ///

Deletes the specified image from your account. To delete an image, you must first delete any bundles that are associated with the image and un-share the image if it is shared with other accounts.

fn delete_workspace_image( &self, input: DeleteWorkspaceImageRequest, @@ -3557,7 +3697,7 @@ impl Workspaces for WorkspacesClient { }) } - ///

Sets the state of the specified WorkSpace.

To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to reboot, stop, start, or rebuild. An AutoStop WorkSpace in this state is not stopped. Users can log into a WorkSpace in the ADMIN_MAINTENANCE state.

+ ///

Sets the state of the specified WorkSpace.

To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE state.

fn modify_workspace_state( &self, input: ModifyWorkspaceStateRequest, diff --git a/rusoto/services/xray/Cargo.toml b/rusoto/services/xray/Cargo.toml index 22b3190c925..bee7f4e92ae 100644 --- a/rusoto/services/xray/Cargo.toml +++ b/rusoto/services/xray/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "rusoto_xray" readme = "README.md" repository = "https://github.com/rusoto/rusoto" -version = "0.40.0" +version = "0.41.0" homepage = "https://www.rusoto.org/" edition = "2018" exclude = ["test_resources/*"] @@ -22,14 +22,16 @@ serde_derive = "1.0.2" serde_json = "1.0.1" [dependencies.rusoto_core] -version = "0.40.0" +version = "0.41.0" path = "../../core" default-features = false [dev-dependencies.rusoto_mock] -version = "0.40.0" +version = "0.41.0" path = "../../../mock" +default-features = false [features] default = ["native-tls"] native-tls = ["rusoto_core/native-tls"] rustls = ["rusoto_core/rustls"] +serialize_structs = [] diff --git a/rusoto/services/xray/README.md b/rusoto/services/xray/README.md index 9dcb451dce3..bb7ea64a429 100644 --- a/rusoto/services/xray/README.md +++ b/rusoto/services/xray/README.md @@ -23,9 +23,16 @@ To use `rusoto_xray` in your application, add it as a dependency in your `Cargo. ```toml [dependencies] -rusoto_xray = "0.40.0" +rusoto_xray = "0.41.0" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/rusoto/services/xray/src/custom/mod.rs b/rusoto/services/xray/src/custom/mod.rs index e69de29bb2d..8b137891791 100644 --- a/rusoto/services/xray/src/custom/mod.rs +++ b/rusoto/services/xray/src/custom/mod.rs @@ -0,0 +1 @@ + diff --git a/rusoto/services/xray/src/generated.rs b/rusoto/services/xray/src/generated.rs index 92721525f2f..6c69c751f1d 100644 --- a/rusoto/services/xray/src/generated.rs +++ b/rusoto/services/xray/src/generated.rs @@ -9,24 +9,23 @@ // must be updated to generate the changes. // // ================================================================= +#![allow(warnings)] -use std::error::Error; -use std::fmt; - -#[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError, RusotoFuture}; +use std::error::Error; +use std::fmt; use rusoto_core::proto; use rusoto_core::signature::SignedRequest; use serde_json; ///

An alias for an edge.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Alias { ///

The canonical name of the alias.

#[serde(rename = "Name")] @@ -44,7 +43,7 @@ pub struct Alias { ///

Value of a segment annotation. Has one of three value types: Number, Boolean or String.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AnnotationValue { ///

Value for a Boolean annotation.

#[serde(rename = "BooleanValue")] @@ -62,7 +61,7 @@ pub struct AnnotationValue { ///

A list of availability zones corresponding to the segments in a trace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct AvailabilityZoneDetail { ///

The name of a corresponding availability zone.

#[serde(rename = "Name")] @@ -111,7 +110,7 @@ pub struct BatchGetTracesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct BatchGetTracesResult { ///

Pagination token. Not used.

#[serde(rename = "NextToken")] @@ -139,7 +138,7 @@ pub struct CreateGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateGroupResult { ///

The group that was created. Contains the name of the group that was created, the ARN of the group that was generated based on the group name, and the filter expression that was assigned to the group.

#[serde(rename = "Group")] @@ -155,7 +154,7 @@ pub struct CreateSamplingRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct CreateSamplingRuleResult { ///

The saved rule definition and metadata.

#[serde(rename = "SamplingRuleRecord")] @@ -176,7 +175,7 @@ pub struct DeleteGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteGroupResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -192,7 +191,7 @@ pub struct DeleteSamplingRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct DeleteSamplingRuleResult { ///

The deleted rule definition and metadata.

#[serde(rename = "SamplingRuleRecord")] @@ -202,7 +201,7 @@ pub struct DeleteSamplingRuleResult { ///

Information about a connection between two services.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Edge { ///

Aliases for the edge.

#[serde(rename = "Aliases")] @@ -232,7 +231,7 @@ pub struct Edge { ///

Response statistics for an edge.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EdgeStatistics { ///

Information about requests that failed with a 4xx Client Error status code.

#[serde(rename = "ErrorStatistics")] @@ -258,7 +257,7 @@ pub struct EdgeStatistics { ///

A configuration document that specifies encryption configuration settings.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct EncryptionConfig { ///

The ID of the customer master key (CMK) used for encryption, if applicable.

#[serde(rename = "KeyId")] @@ -276,7 +275,7 @@ pub struct EncryptionConfig { ///

The root cause of a trace summary error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorRootCause { ///

A list of services corresponding to an error. A service identifies a segment and it contains a name, account ID, type, and inferred flag.

#[serde(rename = "Services")] @@ -286,7 +285,7 @@ pub struct ErrorRootCause { ///

A collection of segments and corresponding subsegments associated to a trace summary error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorRootCauseEntity { ///

The types and messages of the exceptions.

#[serde(rename = "Exceptions")] @@ -304,7 +303,7 @@ pub struct ErrorRootCauseEntity { ///

A collection of fields identifying the services in a trace summary error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorRootCauseService { ///

The account ID associated to the service.

#[serde(rename = "AccountId")] @@ -334,7 +333,7 @@ pub struct ErrorRootCauseService { ///

Information about requests that failed with a 4xx Client Error status code.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ErrorStatistics { ///

The number of requests that failed with untracked 4xx Client Error status codes.

#[serde(rename = "OtherCount")] @@ -352,7 +351,7 @@ pub struct ErrorStatistics { ///

The root cause information for a trace summary fault.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaultRootCause { ///

A list of corresponding services. A service identifies a segment and it contains a name, account ID, type, and inferred flag.

#[serde(rename = "Services")] @@ -362,7 +361,7 @@ pub struct FaultRootCause { ///

A collection of segments and corresponding subsegments associated to a trace summary fault error.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaultRootCauseEntity { ///

The types and messages of the exceptions.

#[serde(rename = "Exceptions")] @@ -380,7 +379,7 @@ pub struct FaultRootCauseEntity { ///

A collection of fields identifying the services in a trace summary fault.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaultRootCauseService { ///

The account ID associated to the service.

#[serde(rename = "AccountId")] @@ -410,7 +409,7 @@ pub struct FaultRootCauseService { ///

Information about requests that failed with a 5xx Server Error status code.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct FaultStatistics { ///

The number of requests that failed with untracked 5xx Server Error status codes.

#[serde(rename = "OtherCount")] @@ -426,7 +425,7 @@ pub struct FaultStatistics { pub struct GetEncryptionConfigRequest {} #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetEncryptionConfigResult { ///

The encryption configuration document.

#[serde(rename = "EncryptionConfig")] @@ -447,7 +446,7 @@ pub struct GetGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupResult { ///

The group that was requested. Contains the name of the group, the ARN of the group, and the filter expression that assigned to the group.

#[serde(rename = "Group")] @@ -464,7 +463,7 @@ pub struct GetGroupsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetGroupsResult { ///

The collection of all active groups.

#[serde(rename = "Groups")] @@ -485,7 +484,7 @@ pub struct GetSamplingRulesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSamplingRulesResult { ///

Pagination token. Not used.

#[serde(rename = "NextToken")] @@ -506,7 +505,7 @@ pub struct GetSamplingStatisticSummariesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSamplingStatisticSummariesResult { ///

Pagination token. Not used.

#[serde(rename = "NextToken")] @@ -526,7 +525,7 @@ pub struct GetSamplingTargetsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetSamplingTargetsResult { ///

The last time a user changed the sampling rule configuration. If the sampling rule configuration changed since the service last retrieved it, the service should call GetSamplingRules to get the latest version.

#[serde(rename = "LastRuleModification")] @@ -565,7 +564,7 @@ pub struct GetServiceGraphRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetServiceGraphResult { ///

A flag indicating whether the group's filter expression has been consistent, or if the returned service graph may show traces from an older version of the group's filter expression.

#[serde(rename = "ContainsOldGroupVersions")] @@ -620,7 +619,7 @@ pub struct GetTimeSeriesServiceStatisticsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTimeSeriesServiceStatisticsResult { ///

A flag indicating whether or not a group's filter expression has been consistent, or if a returned aggregation may show statistics from an older version of the group's filter expression.

#[serde(rename = "ContainsOldGroupVersions")] @@ -648,7 +647,7 @@ pub struct GetTraceGraphRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTraceGraphResult { ///

Pagination token. Not used.

#[serde(rename = "NextToken")] @@ -691,7 +690,7 @@ pub struct GetTraceSummariesRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GetTraceSummariesResult { ///

The start time of this page of results.

#[serde(rename = "ApproximateTime")] @@ -713,7 +712,7 @@ pub struct GetTraceSummariesResult { ///

Details and metadata for a group.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Group { ///

The filter expression defining the parameters to include traces.

#[serde(rename = "FilterExpression")] @@ -731,7 +730,7 @@ pub struct Group { ///

Details for a group without metadata.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct GroupSummary { ///

The filter expression defining the parameters to include traces.

#[serde(rename = "FilterExpression")] @@ -749,7 +748,7 @@ pub struct GroupSummary { ///

An entry in a histogram for a statistic. A histogram maps the range of observed values on the X axis, and the prevalence of each value on the Y axis.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct HistogramEntry { ///

The prevalence of the entry.

#[serde(rename = "Count")] @@ -763,7 +762,7 @@ pub struct HistogramEntry { ///

Information about an HTTP request.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Http { ///

The IP address of the requestor.

#[serde(rename = "ClientIp")] @@ -789,7 +788,7 @@ pub struct Http { ///

A list of EC2 instance IDs corresponding to the segments in a trace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct InstanceIdDetail { ///

The ID of a corresponding EC2 instance.

#[serde(rename = "Id")] @@ -809,7 +808,7 @@ pub struct PutEncryptionConfigRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutEncryptionConfigResult { ///

The new encryption configuration.

#[serde(rename = "EncryptionConfig")] @@ -837,7 +836,7 @@ pub struct PutTelemetryRecordsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutTelemetryRecordsResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] @@ -848,7 +847,7 @@ pub struct PutTraceSegmentsRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct PutTraceSegmentsResult { ///

Segments that failed processing.

#[serde(rename = "UnprocessedTraceSegments")] @@ -858,7 +857,7 @@ pub struct PutTraceSegmentsResult { ///

A list of resources ARNs corresponding to the segments in a trace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResourceARNDetail { ///

The ARN of a corresponding resource.

#[serde(rename = "ARN")] @@ -868,7 +867,7 @@ pub struct ResourceARNDetail { ///

The root cause information for a response time warning.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResponseTimeRootCause { ///

A list of corresponding services. A service identifies a segment and contains a name, account ID, type, and inferred flag.

#[serde(rename = "Services")] @@ -878,7 +877,7 @@ pub struct ResponseTimeRootCause { ///

A collection of segments and corresponding subsegments associated to a response time warning.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResponseTimeRootCauseEntity { ///

The types and messages of the exceptions.

#[serde(rename = "Coverage")] @@ -896,7 +895,7 @@ pub struct ResponseTimeRootCauseEntity { ///

A collection of fields identifying the service in a response time warning.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ResponseTimeRootCauseService { ///

The account ID associated to the service.

#[serde(rename = "AccountId")] @@ -926,7 +925,7 @@ pub struct ResponseTimeRootCauseService { ///

The exception associated with a root cause.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct RootCauseException { ///

The message of the exception.

#[serde(rename = "Message")] @@ -987,7 +986,7 @@ pub struct SamplingRule { ///

A SamplingRule and its metadata.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SamplingRuleRecord { ///

When the rule was created.

#[serde(rename = "CreatedAt")] @@ -1058,7 +1057,7 @@ pub struct SamplingRuleUpdate { ///

Aggregated request sampling data for a sampling rule across all services for a 10 second window.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SamplingStatisticSummary { ///

The number of requests recorded with borrowed reservoir quota.

#[serde(rename = "BorrowCount")] @@ -1121,7 +1120,7 @@ pub struct SamplingStrategy { ///

Temporary changes to a sampling rule configuration. To meet the global sampling target for a rule, X-Ray calculates a new reservoir for each service based on the recent sampling results of all services that called GetSamplingTargets.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct SamplingTargetDocument { ///

The percentage of matching requests to instrument, after the reservoir is exhausted.

#[serde(rename = "FixedRate")] @@ -1147,7 +1146,7 @@ pub struct SamplingTargetDocument { ///

A segment from a trace that has been ingested by the X-Ray service. The segment can be compiled from documents uploaded with PutTraceSegments, or an inferred segment for a downstream service, generated from a subsegment sent by the service that called it.

For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Segment { ///

The segment document.

#[serde(rename = "Document")] @@ -1161,7 +1160,7 @@ pub struct Segment { ///

Information about an application that processed requests, users that made requests, or downstream services, resources and applications that an application used.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Service { ///

Identifier of the AWS account in which the service runs.

#[serde(rename = "AccountId")] @@ -1219,7 +1218,7 @@ pub struct Service { ///

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceId { ///

#[serde(rename = "AccountId")] @@ -1241,7 +1240,7 @@ pub struct ServiceId { ///

Response statistics for a service.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ServiceStatistics { ///

Information about requests that failed with a 4xx Client Error status code.

#[serde(rename = "ErrorStatistics")] @@ -1295,7 +1294,7 @@ pub struct TelemetryRecord { ///

A list of TimeSeriesStatistic structures.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TimeSeriesServiceStatistics { #[serde(rename = "EdgeSummaryStatistics")] #[serde(skip_serializing_if = "Option::is_none")] @@ -1315,7 +1314,7 @@ pub struct TimeSeriesServiceStatistics { ///

A collection of segment documents with matching trace IDs.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct Trace { ///

The length of time in seconds between the start time of the root segment and the end time of the last segment that completed.

#[serde(rename = "Duration")] @@ -1333,7 +1332,7 @@ pub struct Trace { ///

Metadata generated from the segment documents in a trace.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TraceSummary { ///

Annotations from the trace's segment documents.

#[serde(rename = "Annotations")] @@ -1419,7 +1418,7 @@ pub struct TraceSummary { ///

Information about a user recorded in segment documents.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct TraceUser { ///

Services that the user's request hit.

#[serde(rename = "ServiceIds")] @@ -1433,7 +1432,7 @@ pub struct TraceUser { ///

Sampling statistics from a call to GetSamplingTargets that X-Ray could not process.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnprocessedStatistics { ///

The error code.

#[serde(rename = "ErrorCode")] @@ -1451,7 +1450,7 @@ pub struct UnprocessedStatistics { ///

Information about a segment that failed processing.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UnprocessedTraceSegment { ///

The error that caused processing to fail.

#[serde(rename = "ErrorCode")] @@ -1484,7 +1483,7 @@ pub struct UpdateGroupRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateGroupResult { ///

The group that was updated. Contains the name of the group that was updated, the ARN of the group that was updated, and the updated filter expression assigned to the group.

#[serde(rename = "Group")] @@ -1500,7 +1499,7 @@ pub struct UpdateSamplingRuleRequest { } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct UpdateSamplingRuleResult { ///

The updated rule definition and metadata.

#[serde(rename = "SamplingRuleRecord")] @@ -1510,7 +1509,7 @@ pub struct UpdateSamplingRuleResult { ///

Information about a segment annotation.

#[derive(Default, Debug, Clone, PartialEq, Deserialize)] -#[cfg_attr(test, derive(Serialize))] +#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))] pub struct ValueWithServiceIds { ///

Values of the annotation.

#[serde(rename = "AnnotationValue")] @@ -2449,10 +2448,7 @@ impl XRayClient { /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> XRayClient { - XRayClient { - client: Client::shared(), - region, - } + Self::new_with_client(Client::shared(), region) } pub fn new_with( @@ -2466,10 +2462,14 @@ impl XRayClient { D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { - XRayClient { - client: Client::new_with(credentials_provider, request_dispatcher), + Self::new_with_client( + Client::new_with(credentials_provider, request_dispatcher), region, - } + ) + } + + pub fn new_with_client(client: Client, region: region::Region) -> XRayClient { + XRayClient { client, region } } } diff --git a/rusoto/signature/Cargo.toml b/rusoto/signature/Cargo.toml new file mode 100644 index 00000000000..a67c90e3b00 --- /dev/null +++ b/rusoto/signature/Cargo.toml @@ -0,0 +1,49 @@ +[package] +authors = [ + "Anthony DiMarco ", + "Jimmy Cuadra ", + "Matthew Mayer ", + "Nikita Pekin " +] +description = "AWS SDK for Rust - Request Signing" +documentation = "https://docs.rs/rusoto_signature" +keywords = ["AWS", "Amazon"] +license = "MIT" +name = "rusoto_signature" +readme = "README.md" +repository = "https://github.com/rusoto/rusoto" +version = "0.41.0" +homepage = "https://www.rusoto.org/" +edition = "2018" + +[badges] +appveyor = { repository = "matthewkmayer/rusoto", branch = "master" } +azure-devops = { project = "matthewkmayer/Rusoto", pipeline = "rusoto.rusoto", build="1" } + +[build-dependencies] +rustc_version = "0.2.1" + +[dependencies] +bytes = "0.4.12" +futures = "0.1.16" +hmac = "0.7.1" +http = "0.1.17" +hyper = "0.12" +log = "0.4.1" +md5 = "0.7" +base64 = "0.11" +hex = "0.4" +serde = "1.0.2" +sha2 = "0.8.0" +time = "0.1.35" +percent-encoding = "2.1.0" +tokio = "0.1.7" + +[dependencies.rusoto_credential] +path = "../credential" +version = "0.41" + +[dev-dependencies] +serde_json = "1.0.2" +serde_test = "1.0.1" + diff --git a/rusoto/signature/src/lib.rs b/rusoto/signature/src/lib.rs new file mode 100644 index 00000000000..e723111539a --- /dev/null +++ b/rusoto/signature/src/lib.rs @@ -0,0 +1,18 @@ +//! AWS API request signatures. +//! +//! Follows [AWS Signature 4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +//! algorithm. +//! +//! If needed, the request will be re-issued to a temporary redirect endpoint. This can happen with +//! newly created S3 buckets not in us-standard/us-east-1. +//! +//! Please note that this module does not expect URIs to already be encoded. +//! +#![cfg_attr(not(feature = "unstable"), deny(warnings))] +pub extern crate rusoto_credential as credential; +pub mod region; +pub mod signature; +pub mod stream; +pub use region::Region; +pub use signature::{SignedRequest, SignedRequestPayload}; +pub use stream::ByteStream; diff --git a/rusoto/signature/src/region.rs b/rusoto/signature/src/region.rs new file mode 100644 index 00000000000..3c647b6bd71 --- /dev/null +++ b/rusoto/signature/src/region.rs @@ -0,0 +1,413 @@ +//! AWS Regions and helper functions. +//! +//! Mostly used for translating the Region enum to a string AWS accepts. +//! +//! For example: `UsEast1` to "us-east-1" + +use crate::credential::ProfileProvider; +use serde::ser::SerializeTuple; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use std; +use std::error::Error; +use std::fmt::{self, Display, Error as FmtError, Formatter}; +use std::str::FromStr; + +/// An AWS region. +/// +/// # Default +/// +/// `Region` implements the `Default` trait. Calling `Region::default()` will attempt to read the +/// `AWS_DEFAULT_REGION` or `AWS_REGION` environment variable. If it is malformed, it will fall back to `Region::UsEast1`. +/// If it is not present it will fallback on the value associated with the current profile in `~/.aws/config` or the file +/// specified by the `AWS_CONFIG_FILE` environment variable. If that is malformed of absent it will fall back on `Region::UsEast1` +/// +/// # AWS-compatible services +/// +/// `Region::Custom` can be used to connect to AWS-compatible services such as DynamoDB Local or Ceph. +/// +/// ``` +/// # use rusoto_signature::Region; +/// Region::Custom { +/// name: "eu-east-3".to_owned(), +/// endpoint: "http://localhost:8000".to_owned(), +/// }; +/// ``` +/// +/// # Caveats +/// +/// `CnNorth1` is currently untested due to Rusoto maintainers not having access to AWS China. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Region { + /// Region that covers the Eastern part of Asia Pacific + ApEast1, + + /// Region that covers the North-Eastern part of Asia Pacific + ApNortheast1, + + /// Region that covers the North-Eastern part of Asia Pacific + ApNortheast2, + + /// Region that covers the North-Eastern part of Asia Pacific + ApNortheast3, + + /// Region that covers the Southern part of Asia Pacific + ApSouth1, + + /// Region that covers the South-Eastern part of Asia Pacific + ApSoutheast1, + + /// Region that covers the South-Eastern part of Asia Pacific + ApSoutheast2, + + /// Region that covers Canada + CaCentral1, + + /// Region that covers Central Europe + EuCentral1, + + /// Region that covers Western Europe + EuWest1, + + /// Region that covers Western Europe + EuWest2, + + /// Region that covers Western Europe + EuWest3, + + /// Region that covers Northern Europe + EuNorth1, + + /// Bahrain, Middle East South + MeSouth1, + + /// Region that covers South America + SaEast1, + + /// Region that covers the Eastern part of the United States + UsEast1, + + /// Region that covers the Eastern part of the United States + UsEast2, + + /// Region that covers the Western part of the United States + UsWest1, + + /// Region that covers the Western part of the United States + UsWest2, + + /// Region that covers the Eastern part of the United States for the US Government + UsGovEast1, + + /// Region that covers the Western part of the United States for the US Government + UsGovWest1, + + /// Region that covers China + CnNorth1, + + /// Region that covers North-Western part of China + CnNorthwest1, + + /// Specifies a custom region, such as a local Ceph target + Custom { + /// Name of the endpoint (e.g. `"eu-east-2"`). + name: String, + + /// Endpoint to be used. For instance, `"https://s3.my-provider.net"` or just + /// `"s3.my-provider.net"` (default scheme is https). + endpoint: String, + }, +} + +impl Region { + /// Name of the region + /// + /// ``` + /// # use rusoto_signature::Region; + /// assert_eq!(Region::CaCentral1.name(), "ca-central-1"); + /// assert_eq!( + /// Region::Custom { name: "eu-east-3".to_owned(), endpoint: "s3.net".to_owned() }.name(), + /// "eu-east-3" + /// ); + /// ``` + pub fn name(&self) -> &str { + match *self { + Region::ApEast1 => "ap-east-1", + Region::ApNortheast1 => "ap-northeast-1", + Region::ApNortheast2 => "ap-northeast-2", + Region::ApNortheast3 => "ap-northeast-3", + Region::ApSouth1 => "ap-south-1", + Region::ApSoutheast1 => "ap-southeast-1", + Region::ApSoutheast2 => "ap-southeast-2", + Region::CaCentral1 => "ca-central-1", + Region::EuCentral1 => "eu-central-1", + Region::EuWest1 => "eu-west-1", + Region::EuWest2 => "eu-west-2", + Region::EuWest3 => "eu-west-3", + Region::EuNorth1 => "eu-north-1", + Region::MeSouth1 => "me-south-1", + Region::SaEast1 => "sa-east-1", + Region::UsEast1 => "us-east-1", + Region::UsEast2 => "us-east-2", + Region::UsWest1 => "us-west-1", + Region::UsWest2 => "us-west-2", + Region::UsGovEast1 => "us-gov-east-1", + Region::UsGovWest1 => "us-gov-west-1", + Region::CnNorth1 => "cn-north-1", + Region::CnNorthwest1 => "cn-northwest-1", + Region::Custom { ref name, .. } => name, + } + } +} + +/// An error produced when attempting to convert a `str` into a `Region` fails. +#[derive(Debug, PartialEq)] +pub struct ParseRegionError { + message: String, +} + +// Manually created for lack of a way to flatten the `Region::Custom` variant +// Related: https://github.com/serde-rs/serde/issues/119 +impl Serialize for Region { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut seq = serializer.serialize_tuple(2)?; + if let Region::Custom { + ref endpoint, + ref name, + } = *self + { + seq.serialize_element(&name)?; + seq.serialize_element(&Some(&endpoint))?; + } else { + seq.serialize_element(self.name())?; + seq.serialize_element(&None as &Option<&str>)?; + } + seq.end() + } +} + +struct RegionVisitor; + +impl<'de> de::Visitor<'de> for RegionVisitor { + type Value = Region; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("sequence of (name, Some(endpoint))") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let name: String = seq + .next_element::()? + .ok_or_else(|| de::Error::custom("region is missing name"))?; + let endpoint: Option = seq.next_element::>()?.unwrap_or_default(); + match (name, endpoint) { + (name, Some(endpoint)) => Ok(Region::Custom { name, endpoint }), + (name, None) => name.parse().map_err(de::Error::custom), + } + } +} + +// Manually created for lack of a way to flatten the `Region::Custom` variant +// Related: https://github.com/serde-rs/serde/issues/119 +impl<'de> Deserialize<'de> for Region { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_tuple(2, RegionVisitor) + } +} + +impl FromStr for Region { + type Err = ParseRegionError; + + fn from_str(s: &str) -> Result { + let v: &str = &s.to_lowercase(); + match v { + "ap-east-1" | "apeast1" => Ok(Region::ApEast1), + "ap-northeast-1" | "apnortheast1" => Ok(Region::ApNortheast1), + "ap-northeast-2" | "apnortheast2" => Ok(Region::ApNortheast2), + "ap-northeast-3" | "apnortheast3" => Ok(Region::ApNortheast3), + "ap-south-1" | "apsouth1" => Ok(Region::ApSouth1), + "ap-southeast-1" | "apsoutheast1" => Ok(Region::ApSoutheast1), + "ap-southeast-2" | "apsoutheast2" => Ok(Region::ApSoutheast2), + "ca-central-1" | "cacentral1" => Ok(Region::CaCentral1), + "eu-central-1" | "eucentral1" => Ok(Region::EuCentral1), + "eu-west-1" | "euwest1" => Ok(Region::EuWest1), + "eu-west-2" | "euwest2" => Ok(Region::EuWest2), + "eu-west-3" | "euwest3" => Ok(Region::EuWest3), + "eu-north-1" | "eunorth1" => Ok(Region::EuNorth1), + "me-south-1" | "mesouth1" => Ok(Region::MeSouth1), + "sa-east-1" | "saeast1" => Ok(Region::SaEast1), + "us-east-1" | "useast1" => Ok(Region::UsEast1), + "us-east-2" | "useast2" => Ok(Region::UsEast2), + "us-west-1" | "uswest1" => Ok(Region::UsWest1), + "us-west-2" | "uswest2" => Ok(Region::UsWest2), + "us-gov-east-1" | "usgoveast1" => Ok(Region::UsGovEast1), + "us-gov-west-1" | "usgovwest1" => Ok(Region::UsGovWest1), + "cn-north-1" | "cnnorth1" => Ok(Region::CnNorth1), + "cn-northwest-1" | "cnnorthwest1" => Ok(Region::CnNorthwest1), + s => Err(ParseRegionError::new(s)), + } + } +} + +impl ParseRegionError { + /// Parses a region given as a string literal into a type `Region' + pub fn new(input: &str) -> Self { + ParseRegionError { + message: format!("Not a valid AWS region: {}", input), + } + } +} + +impl Error for ParseRegionError { + fn description(&self) -> &str { + &self.message + } +} + +impl Display for ParseRegionError { + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + write!(f, "{}", self.message) + } +} + +impl Default for Region { + fn default() -> Region { + match std::env::var("AWS_DEFAULT_REGION").or_else(|_| std::env::var("AWS_REGION")) { + Ok(ref v) => Region::from_str(v).unwrap_or(Region::UsEast1), + Err(_) => match ProfileProvider::region() { + Ok(Some(region)) => Region::from_str(®ion).unwrap_or(Region::UsEast1), + _ => Region::UsEast1, + }, + } + } +} + +#[cfg(test)] +mod tests { + extern crate serde_test; + use self::serde_test::{assert_tokens, Token}; + use super::*; + + #[test] + fn from_str() { + assert_eq!( + "foo" + .parse::() + .err() + .expect("Parsing foo as a Region was not an error") + .to_string(), + "Not a valid AWS region: foo".to_owned() + ); + assert_eq!("ap-east-1".parse(), Ok(Region::ApEast1)); + assert_eq!("ap-northeast-1".parse(), Ok(Region::ApNortheast1)); + assert_eq!("ap-northeast-2".parse(), Ok(Region::ApNortheast2)); + assert_eq!("ap-northeast-3".parse(), Ok(Region::ApNortheast3)); + assert_eq!("ap-south-1".parse(), Ok(Region::ApSouth1)); + assert_eq!("ap-southeast-1".parse(), Ok(Region::ApSoutheast1)); + assert_eq!("ap-southeast-2".parse(), Ok(Region::ApSoutheast2)); + assert_eq!("ca-central-1".parse(), Ok(Region::CaCentral1)); + assert_eq!("eu-central-1".parse(), Ok(Region::EuCentral1)); + assert_eq!("eu-west-1".parse(), Ok(Region::EuWest1)); + assert_eq!("eu-west-2".parse(), Ok(Region::EuWest2)); + assert_eq!("eu-west-3".parse(), Ok(Region::EuWest3)); + assert_eq!("eu-north-1".parse(), Ok(Region::EuNorth1)); + assert_eq!("me-south-1".parse(), Ok(Region::MeSouth1)); + assert_eq!("sa-east-1".parse(), Ok(Region::SaEast1)); + assert_eq!("us-east-1".parse(), Ok(Region::UsEast1)); + assert_eq!("us-east-2".parse(), Ok(Region::UsEast2)); + assert_eq!("us-west-1".parse(), Ok(Region::UsWest1)); + assert_eq!("us-west-2".parse(), Ok(Region::UsWest2)); + assert_eq!("us-gov-east-1".parse(), Ok(Region::UsGovEast1)); + assert_eq!("us-gov-west-1".parse(), Ok(Region::UsGovWest1)); + assert_eq!("cn-north-1".parse(), Ok(Region::CnNorth1)); + assert_eq!("cn-northwest-1".parse(), Ok(Region::CnNorthwest1)); + } + + #[test] + fn region_serialize_deserialize() { + assert_tokens(&Region::ApEast1, &tokens_for_region("ap-east-1")); + assert_tokens(&Region::ApNortheast1, &tokens_for_region("ap-northeast-1")); + assert_tokens(&Region::ApNortheast2, &tokens_for_region("ap-northeast-2")); + assert_tokens(&Region::ApNortheast3, &tokens_for_region("ap-northeast-3")); + assert_tokens(&Region::ApSouth1, &tokens_for_region("ap-south-1")); + assert_tokens(&Region::ApSoutheast1, &tokens_for_region("ap-southeast-1")); + assert_tokens(&Region::ApSoutheast2, &tokens_for_region("ap-southeast-2")); + assert_tokens(&Region::CaCentral1, &tokens_for_region("ca-central-1")); + assert_tokens(&Region::EuCentral1, &tokens_for_region("eu-central-1")); + assert_tokens(&Region::EuWest1, &tokens_for_region("eu-west-1")); + assert_tokens(&Region::EuWest2, &tokens_for_region("eu-west-2")); + assert_tokens(&Region::EuWest3, &tokens_for_region("eu-west-3")); + assert_tokens(&Region::EuNorth1, &tokens_for_region("eu-north-1")); + assert_tokens(&Region::MeSouth1, &tokens_for_region("me-south-1")); + assert_tokens(&Region::SaEast1, &tokens_for_region("sa-east-1")); + assert_tokens(&Region::UsEast1, &tokens_for_region("us-east-1")); + assert_tokens(&Region::UsEast2, &tokens_for_region("us-east-2")); + assert_tokens(&Region::UsWest1, &tokens_for_region("us-west-1")); + assert_tokens(&Region::UsWest2, &tokens_for_region("us-west-2")); + assert_tokens(&Region::UsGovEast1, &tokens_for_region("us-gov-east-1")); + assert_tokens(&Region::UsGovWest1, &tokens_for_region("us-gov-west-1")); + assert_tokens(&Region::CnNorth1, &tokens_for_region("cn-north-1")); + assert_tokens(&Region::CnNorthwest1, &tokens_for_region("cn-northwest-1")) + } + + fn tokens_for_region(name: &'static str) -> [Token; 4] { + [ + Token::Tuple { len: 2 }, + Token::String(name), + Token::None, + Token::TupleEnd, + ] + } + + #[test] + fn region_serialize_deserialize_custom() { + let custom_region = Region::Custom { + endpoint: "http://localhost:8000".to_owned(), + name: "eu-east-1".to_owned(), + }; + assert_tokens( + &custom_region, + &[ + Token::Tuple { len: 2 }, + Token::String("eu-east-1"), + Token::Some, + Token::String("http://localhost:8000"), + Token::TupleEnd, + ], + ); + let expected = "[\"eu-east-1\",\"http://localhost:8000\"]"; + let region_deserialized = serde_json::to_string(&custom_region).unwrap(); + assert_eq!(region_deserialized, expected); + + let from_json = serde_json::de::from_str(®ion_deserialized).unwrap(); + assert_eq!(custom_region, from_json); + } + + #[test] + fn region_serialize_deserialize_standard() { + let r = Region::UsWest2; + let region_deserialized = serde_json::to_string(&r).unwrap(); + let expected = "[\"us-west-2\",null]"; + + assert_eq!(region_deserialized, expected); + + let from_json = serde_json::de::from_str(®ion_deserialized).unwrap(); + assert_eq!(r, from_json); + } + + #[test] + fn region_serialize_deserialize_standard_only_region_name() { + let r = Region::UsWest2; + let only_region_name = "[\"us-west-2\"]"; + let from_json = serde_json::de::from_str(&only_region_name).unwrap(); + assert_eq!(r, from_json); + } +} diff --git a/rusoto/signature/src/signature.rs b/rusoto/signature/src/signature.rs new file mode 100644 index 00000000000..4abd240b8a9 --- /dev/null +++ b/rusoto/signature/src/signature.rs @@ -0,0 +1,1118 @@ +//! AWS API request signatures. +//! +//! Follows [AWS Signature 4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +//! algorithm. +//! +//! If needed, the request will be re-issued to a temporary redirect endpoint. This can happen with +//! newly created S3 buckets not in us-standard/us-east-1. +//! +//! Please note that this module does not expect URIs to already be encoded. +//! + +use std::borrow::Cow; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::convert::TryInto; +use std::fmt; +use std::str; +use std::time::Duration; + +use base64; +use bytes::Bytes; +use hex; +use hmac::{Hmac, Mac}; +use http::header::{HeaderMap, HeaderName, HeaderValue}; +use http::{HttpTryFrom, Method, Request}; +use hyper::Body; +use log::{debug, log_enabled, Level::Debug}; +use md5; +use percent_encoding::{percent_decode, utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC}; +use sha2::{Digest, Sha256}; +use time::now_utc; +use time::Tm; + +use crate::credential::AwsCredentials; +use crate::region::Region; +use crate::stream::ByteStream; + +pub type Params = BTreeMap>; + +/// Payload string to use for unsigned payload +pub static UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD"; +/// Payload string to use for signed empty payload +pub static EMPTY_SHA256_HASH: &str = + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + +/// Possible payloads included in a `SignedRequest`. +pub enum SignedRequestPayload { + /// Transfer payload in a single chunk + Buffer(Bytes), + /// Transfer payload in multiple chunks + Stream(ByteStream), +} + +impl fmt::Debug for SignedRequestPayload { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + SignedRequestPayload::Buffer(ref buf) => { + write!(f, "SignedRequestPayload::Buffer(len = {})", buf.len()) + } + SignedRequestPayload::Stream(ref stream) => write!( + f, + "SignedRequestPayload::Stream(size_hint = {:?})", + stream.size_hint() + ), + } + } +} + +/// A data structure for all the elements of an HTTP request that are involved in +/// the Amazon Signature Version 4 signing process +#[derive(Debug)] +pub struct SignedRequest { + /// The HTTP Method + pub method: String, + /// The AWS Service + pub service: String, + /// The AWS Region + pub region: Region, + /// The HTTP request path + pub path: String, + /// The HTTP Request Headers + pub headers: BTreeMap>>, + /// The HTTP request paramaters + pub params: Params, + /// The HTTP/HTTPS protocol + pub scheme: Option, + /// The AWS hostname + pub hostname: Option, + /// The HTTP Content + pub payload: Option, + /// The Standardised query string + pub canonical_query_string: String, + /// The Standardised URI + pub canonical_uri: String, +} + +impl SignedRequest { + /// Default constructor + pub fn new(method: &str, service: &str, region: &Region, path: &str) -> SignedRequest { + SignedRequest { + method: method.to_string(), + service: service.to_string(), + region: region.clone(), + path: path.to_string(), + headers: BTreeMap::new(), + params: Params::new(), + scheme: None, + hostname: None, + payload: None, + canonical_query_string: String::new(), + canonical_uri: String::new(), + } + } + + /// Sets the value of the "content-type" header. + pub fn set_content_type(&mut self, content_type: String) { + self.add_header("content-type", &content_type); + } + + /// Sets the target hostname + pub fn set_hostname(&mut self, hostname: Option) { + self.hostname = hostname; + } + + /// Sets the target hostname using the current service type and region + /// + /// See the implementation of build_hostname to see how this is done + pub fn set_endpoint_prefix(&mut self, endpoint_prefix: String) { + self.hostname = Some(build_hostname(&endpoint_prefix, &self.region)); + } + + /// Sets the new body (payload) + pub fn set_payload>(&mut self, payload: Option) { + self.payload = payload.map(|chunk| SignedRequestPayload::Buffer(chunk.into())); + } + + /// Sets the new body (payload) as a stream + pub fn set_payload_stream(&mut self, stream: ByteStream) { + self.payload = Some(SignedRequestPayload::Stream(stream)); + } + + /// Computes and sets the Content-MD5 header based on the current payload. + /// + /// Has no effect if the payload is not set, or is not a buffer. + pub fn set_content_md5_header(&mut self) { + let digest; + if let Some(SignedRequestPayload::Buffer(ref payload)) = self.payload { + digest = Some(md5::compute(payload)); + } else { + digest = None; + } + if let Some(digest) = digest { + // need to deref digest and then pass that reference: + self.add_header("Content-MD5", &base64::encode(&(*digest))); + } + } + + /// Returns the current HTTP method + pub fn method(&self) -> &str { + &self.method + } + + /// Returns the current path + pub fn path(&self) -> &str { + &self.path + } + + /// Invokes `canonical_uri(path)` to return a canonical path + pub fn canonical_path(&self) -> String { + canonical_uri(&self.path, &self.region) + } + + /// Returns the current canonical URI + pub fn canonical_uri(&self) -> &str { + &self.canonical_uri + } + + /// Returns the current query string + /// + /// Converts a paramater such as "example param": "examplekey" into "&example+param=examplekey" + pub fn canonical_query_string(&self) -> &str { + &self.canonical_query_string + } + + /// Returns the current headers + pub fn headers(&self) -> &BTreeMap>> { + &self.headers + } + + /// Returns the current http scheme (https or http) + pub fn scheme(&self) -> String { + match self.scheme { + Some(ref p) => p.to_string(), + None => match self.region { + Region::Custom { ref endpoint, .. } => { + if endpoint.starts_with("http://") { + "http".to_owned() + } else { + "https".to_owned() + } + } + _ => "https".to_owned(), + }, + } + } + + /// Converts hostname to String if it exists, else it invokes build_hostname() + pub fn hostname(&self) -> String { + // hostname may be already set by an endpoint prefix + match self.hostname { + Some(ref h) => h.to_string(), + None => build_hostname(&self.service, &self.region), + } + } + + /// If the key exists in headers, set it to blank/unoccupied: + pub fn remove_header(&mut self, key: &str) { + let key_lower = key.to_ascii_lowercase().to_string(); + self.headers.remove(&key_lower); + } + + /// Add a value to the array of headers for the specified key. + /// Headers are kept sorted by key name for use at signing (BTreeMap) + pub fn add_header(&mut self, key: K, value: &str) { + let key_lower = key.to_string().to_ascii_lowercase(); + let value_vec = value.as_bytes().to_vec(); + + match self.headers.entry(key_lower) { + Entry::Vacant(entry) => { + let mut values = Vec::new(); + values.push(value_vec); + entry.insert(values); + } + Entry::Occupied(entry) => { + entry.into_mut().push(value_vec); + } + } + } + + /// Adds parameter to the HTTP Request + pub fn add_param(&mut self, key: S, value: S) + where + S: Into, + { + self.params.insert(key.into(), Some(value.into())); + } + + /// Sets paramaters with a given variable of `Params` type + pub fn set_params(&mut self, params: Params) { + self.params = params; + } + + /// Generate a Presigned URL for AWS + /// + /// See the [documentation](https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html) + /// for more information. + pub fn generate_presigned_url( + &mut self, + creds: &AwsCredentials, + expires_in: &Duration, + should_sha256_sign_payload: bool, + ) -> String { + debug!("Presigning request URL"); + + self.sign(creds); + let hostname = self.hostname(); + + let current_time = now_utc(); + let current_time_fmted = current_time.strftime("%Y%m%dT%H%M%SZ").unwrap(); + let current_time_fmted = format!("{}", ¤t_time_fmted); + let current_date = current_time.strftime("%Y%m%d").unwrap(); + + self.remove_header("X-Amz-Content-Sha256"); + + self.remove_header("X-Amz-Date"); + + self.remove_header("Content-Type"); + + if let Some(ref token) = *creds.token() { + self.remove_header("X-Amz-Security-Token"); + self.params.insert( + "X-Amz-Security-Token".into(), + encode_uri_strict(token).into(), + ); + } + + self.remove_header("X-Amz-Algorithm"); + self.params + .insert("X-Amz-Algorithm".into(), Some("AWS4-HMAC-SHA256".into())); + + self.remove_header("X-Amz-Credential"); + self.params.insert( + "X-Amz-Credential".into(), + format!( + "{}/{}/{}/{}/aws4_request", + &creds.aws_access_key_id(), + ¤t_date, + self.region.name(), + self.service + ) + .into(), + ); + + self.remove_header("X-Amz-Expires"); + let expiration_time = format!("{}", expires_in.as_secs()); + self.params + .insert("X-Amz-Expires".into(), expiration_time.into()); + + self.canonical_uri = canonical_uri(&self.path, &self.region); + let canonical_headers = canonical_headers(&self.headers); + + let signed_headers = signed_headers(&self.headers); + self.params + .insert("X-Amz-SignedHeaders".into(), Some(signed_headers.clone())); + + self.params + .insert("X-Amz-Date".into(), current_time_fmted.into()); + + self.canonical_query_string = build_canonical_query_string(&self.params); + + debug!("canonical_uri: {:?}", self.canonical_uri); + debug!("canonical_headers: {:?}", canonical_headers); + debug!("signed_headers: {:?}", signed_headers); + debug!("canonical_query_string: {:?}", self.canonical_query_string); + + let payload = if should_sha256_sign_payload { + match self.payload { + None => Cow::Borrowed(EMPTY_SHA256_HASH), + Some(SignedRequestPayload::Buffer(ref payload)) => { + let (digest, _len) = digest_payload(&payload); + Cow::Owned(digest) + } + Some(SignedRequestPayload::Stream(ref _stream)) => Cow::Borrowed(UNSIGNED_PAYLOAD), + } + } else { + Cow::Borrowed(UNSIGNED_PAYLOAD) + }; + + let canonical_request = format!( + "{}\n{}\n{}\n{}\n{}\n{}", + &self.method, + self.canonical_uri, + self.canonical_query_string, + canonical_headers, + &signed_headers, + payload + ); + + debug!("canonical_request: {:?}", canonical_request); + + // use the hashed canonical request to build the string to sign + let hashed_canonical_request = to_hexdigest(&canonical_request); + + debug!("hashed_canonical_request: {:?}", hashed_canonical_request); + + let scope = format!( + "{}/{}/{}/aws4_request", + current_date, + self.region.name(), + &self.service + ); + + debug!("scope: {}", scope); + + let string_to_sign = string_to_sign(current_time, &hashed_canonical_request, &scope); + + debug!("string_to_sign: {}", string_to_sign); + + let signature = sign_string( + &string_to_sign, + creds.aws_secret_access_key(), + current_time, + &self.region.name(), + &self.service, + ); + self.params + .insert("X-Amz-Signature".into(), signature.into()); + + format!( + "{}://{}{}?{}", + self.scheme(), + hostname, + self.canonical_uri, + build_canonical_query_string(&self.params) + ) + } + + /// Signs the request using Amazon Signature version 4 to verify identity. + /// Authorization header uses AWS4-HMAC-SHA256 for signing. + pub fn sign(&mut self, creds: &AwsCredentials) { + self.sign_with_plus(creds, false) + } + + /// Complement SignedRequest by ensuring the following HTTP headers are set accordingly: + /// - host + /// - content-type + /// - content-length (if applicable) + pub fn complement(&mut self) { + self.complement_with_plus(false) + } + + /// Complement SignedRequest by ensuring the following HTTP headers are set accordingly: + /// - host + /// - content-type + /// - content-length (if applicable) + pub fn complement_with_plus(&mut self, should_treat_plus_literally: bool) { + // build the canonical request + self.canonical_uri = self.canonical_path(); + self.canonical_query_string = + build_canonical_query_string_with_plus(&self.params, should_treat_plus_literally); + // Gotta remove and re-add headers since by default they append the value. If we're following + // a 307 redirect we end up with Three Stooges in the headers with duplicate values. + self.remove_header("host"); + self.add_header("host", &self.hostname()); + // if there's no content-type header set, set it to the default value + if let Entry::Vacant(entry) = self.headers.entry("content-type".to_owned()) { + let mut values = Vec::new(); + values.push(b"application/octet-stream".to_vec()); + entry.insert(values); + } + let len = match self.payload { + None => Some(0), + Some(SignedRequestPayload::Buffer(ref payload)) => Some(payload.len()), + Some(SignedRequestPayload::Stream(ref stream)) => stream.size_hint(), + }; + if let Some(len) = len { + self.remove_header("content-length"); + self.add_header("content-length", &format!("{}", len)); + } + } + + /// Signs the request using Amazon Signature version 4 to verify identity. + /// Authorization header uses AWS4-HMAC-SHA256 for signing. + pub fn sign_with_plus(&mut self, creds: &AwsCredentials, should_treat_plus_literally: bool) { + self.complement_with_plus(should_treat_plus_literally); + let date = now_utc(); + self.remove_header("x-amz-date"); + self.add_header( + "x-amz-date", + &date.strftime("%Y%m%dT%H%M%SZ").unwrap().to_string(), + ); + + if let Some(ref token) = *creds.token() { + self.remove_header("X-Amz-Security-Token"); + self.add_header("X-Amz-Security-Token", token); + } + + let digest = match self.payload { + None => Cow::Borrowed(EMPTY_SHA256_HASH), + Some(SignedRequestPayload::Buffer(ref payload)) => { + let (digest, _) = digest_payload(&payload); + Cow::Owned(digest) + } + Some(SignedRequestPayload::Stream(_)) => Cow::Borrowed(UNSIGNED_PAYLOAD), + }; + self.remove_header("x-amz-content-sha256"); + self.add_header("x-amz-content-sha256", &digest); + + let signed_headers = signed_headers(&self.headers); + + let canonical_headers = canonical_headers(&self.headers); + + // Normalize URI paths according to RFC 3986. Remove redundant and relative path components. Each path segment must be URI-encoded twice (except for Amazon S3 which only gets URI-encoded once). + // see https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + let canonical_uri = if &self.service != "s3" { + utf8_percent_encode(&self.canonical_uri, &STRICT_PATH_ENCODE_SET).collect::() + } else { + self.canonical_uri.clone() + }; + + let canonical_request = format!( + "{}\n{}\n{}\n{}\n{}\n{}", + &self.method, + canonical_uri, + self.canonical_query_string, + canonical_headers, + signed_headers, + digest + ); + + // use the hashed canonical request to build the string to sign + let hashed_canonical_request = to_hexdigest(&canonical_request); + let scope = format!( + "{}/{}/{}/aws4_request", + date.strftime("%Y%m%d").unwrap(), + self.region.name(), + &self.service + ); + let string_to_sign = string_to_sign(date, &hashed_canonical_request, &scope); + + // sign the string + let signature = sign_string( + &string_to_sign, + creds.aws_secret_access_key(), + date, + &self.region.name(), + &self.service, + ); + + // build the actual auth header + let auth_header = format!( + "AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}", + &creds.aws_access_key_id(), + scope, + signed_headers, + signature + ); + self.remove_header("authorization"); + self.add_header("authorization", &auth_header); + } +} + +impl TryInto> for SignedRequest { + type Error = http::Error; + + fn try_into(self) -> Result, Self::Error> { + let method = Method::try_from(self.method.as_str())?; + + let headers = self + .headers() + .iter() + .try_fold::<_, _, Result<_, Self::Error>>(HeaderMap::new(), |mut headers, (k, v)| { + let name = HeaderName::from_bytes(k.as_bytes())?; + for v in v.iter() { + let value = HeaderValue::from_bytes(v)?; + headers.append(&name, value); + } + Ok(headers) + })?; + + let mut final_uri = format!( + "{}://{}{}", + self.scheme(), + self.hostname(), + self.canonical_path() + ); + if !self.canonical_query_string().is_empty() { + final_uri = final_uri + &format!("?{}", self.canonical_query_string()); + } + + if log_enabled!(Debug) { + let payload = match self.payload { + Some(SignedRequestPayload::Buffer(ref payload_bytes)) => { + String::from_utf8(payload_bytes.as_ref().to_owned()) + .unwrap_or_else(|_| String::from("")) + } + Some(SignedRequestPayload::Stream(ref stream)) => { + format!("", stream.size_hint()) + } + None => "".to_owned(), + }; + + debug!( + "Full request: \n method: {}\n final_uri: {}\n payload: {}\nHeaders:\n", + method, final_uri, payload + ); + for (h, v) in headers.iter() { + debug!("{}:{:?}", h.as_str(), v); + } + } + + let mut builder = Request::builder(); + builder.method(method); + builder.uri(final_uri); + + let body = if let Some(payload) = self.payload { + match payload { + SignedRequestPayload::Buffer(bytes) => Body::from(bytes), + SignedRequestPayload::Stream(stream) => Body::wrap_stream(stream), + } + } else { + Body::empty() + }; + let mut request = builder.body(body)?; + + *request.headers_mut() = headers; + Ok(request) + } +} + +/// Convert payload from Char array to useable format. +fn digest_payload(payload: &[u8]) -> (String, usize) { + let digest = to_hexdigest(payload); + let len = payload.len(); + (digest, len) +} + +#[inline] +fn hmac(secret: &[u8], message: &[u8]) -> Hmac { + let mut hmac = Hmac::::new_varkey(secret).expect("failed to create hmac"); + hmac.input(message); + hmac +} + +/// Takes a message and signs it using AWS secret, time, region keys and service keys. +fn sign_string( + string_to_sign: &str, + secret: &str, + date: Tm, + region: &str, + service: &str, +) -> String { + let date_str = date.strftime("%Y%m%d").unwrap().to_string(); + let date_hmac = hmac(format!("AWS4{}", secret).as_bytes(), date_str.as_bytes()) + .result() + .code(); + let region_hmac = hmac(date_hmac.as_ref(), region.as_bytes()).result().code(); + let service_hmac = hmac(region_hmac.as_ref(), service.as_bytes()) + .result() + .code(); + let signing_hmac = hmac(service_hmac.as_ref(), b"aws4_request").result().code(); + hex::encode( + hmac(signing_hmac.as_ref(), string_to_sign.as_bytes()) + .result() + .code() + .as_ref(), + ) +} + +/// Mark string as AWS4-HMAC-SHA256 hashed +pub fn string_to_sign(date: Tm, hashed_canonical_request: &str, scope: &str) -> String { + format!( + "AWS4-HMAC-SHA256\n{}\n{}\n{}", + date.strftime("%Y%m%dT%H%M%SZ").unwrap(), + scope, + hashed_canonical_request + ) +} + +fn signed_headers(headers: &BTreeMap>>) -> String { + let mut signed = String::new(); + headers + .iter() + .filter(|&(ref key, _)| !skipped_headers(&key)) + .for_each(|(key, _)| { + if !signed.is_empty() { + signed.push(';'); + } + signed.push_str(key); + }); + signed +} + +/// Canonicalizes headers into the AWS Canonical Form. +/// +/// Read more about it: [HERE](http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html) +fn canonical_headers(headers: &BTreeMap>>) -> String { + let mut canonical = String::new(); + + for (key, value) in headers.iter() { + if skipped_headers(key) { + continue; + } + canonical.push_str(format!("{}:{}\n", key, canonical_values(value)).as_ref()); + } + canonical +} + +/// Canonicalizes values into the AWS Canonical Form. +/// +/// Read more about it: [HERE](http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html) +fn canonical_values(values: &[Vec]) -> String { + let mut st = String::new(); + for v in values { + let s = str::from_utf8(v).unwrap(); + if !st.is_empty() { + st.push(',') + } + if s.starts_with('\"') { + st.push_str(s); + } else { + st.push_str(s.replace(" ", " ").trim()); + } + } + st +} + +fn skipped_headers(header: &str) -> bool { + ["authorization", "content-length", "user-agent"].contains(&header) +} + +/// Returns standardised URI +fn canonical_uri(path: &str, region: &Region) -> String { + let endpoint_path = match region { + Region::Custom { ref endpoint, .. } => extract_endpoint_path(endpoint), + _ => None, + }; + match (endpoint_path, path) { + (Some(prefix), "") => prefix.to_string(), + (None, "") => "/".to_string(), + (Some(prefix), _) => encode_uri_path(&(prefix.to_owned() + path)), + _ => encode_uri_path(path), + } +} + +/// Canonicalizes query while iterating through the given paramaters +/// +/// Read more about it: [HERE](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html#query-string-auth-v4-signing) +fn build_canonical_query_string(params: &Params) -> String { + build_canonical_query_string_with_plus(params, false) +} + +/// Canonicalizes query while iterating through the given parameters. +/// +/// Read more about it: [HERE](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html#query-string-auth-v4-signing) +fn build_canonical_query_string_with_plus( + params: &Params, + should_treat_plus_literally: bool, +) -> String { + if params.is_empty() { + return String::new(); + } + + let mut output = String::new(); + for (key, val) in params.iter() { + if !output.is_empty() { + output.push_str("&"); + } + if should_treat_plus_literally { + output.push_str(&encode_uri_strict(&key)); + } else { + output.push_str(&encode_uri_strict(&key.replace("+", " "))); + } + output.push_str("="); + + if let Some(ref unwrapped_val) = *val { + if should_treat_plus_literally { + output.push_str(&encode_uri_strict(&unwrapped_val)); + } else { + output.push_str(&encode_uri_strict(&unwrapped_val.replace("+", " "))); + } + } + } + + output +} + +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// Do not URI-encode any of the unreserved characters that RFC 3986 defines: +// A-Z, a-z, 0-9, hyphen ( - ), underscore ( _ ), period ( . ), and tilde ( ~ ). +// +// Percent-encode all other characters with %XY, where X and Y are hexadecimal +// characters (0-9 and uppercase A-F). For example, the space character must be +// encoded as %20 (not using '+', as some encoding schemes do) and extended UTF-8 +// characters must be in the form %XY%ZA%BC +/// This constant is used to maintain the strict URI encoding standard as proposed by RFC 3986 +pub const STRICT_ENCODE_SET: AsciiSet = NON_ALPHANUMERIC + .remove(b'-') + .remove(b'.') + .remove(b'_') + .remove(b'~'); + +/// This struct is used to maintain the URI path encoding +pub const STRICT_PATH_ENCODE_SET: AsciiSet = STRICT_ENCODE_SET.remove(b'/'); + +#[inline] +#[doc(hidden)] +pub fn encode_uri_path(uri: &str) -> String { + utf8_percent_encode(uri, &STRICT_PATH_ENCODE_SET).collect::() +} + +#[inline] +fn encode_uri_strict(uri: &str) -> String { + utf8_percent_encode(uri, &STRICT_ENCODE_SET).collect::() +} + +#[inline] +#[doc(hidden)] +pub fn decode_uri(uri: &str) -> String { + let decoder = percent_decode(uri.as_bytes()); + if let Ok(decoded) = decoder.decode_utf8() { + decoded.to_string() + } else { + uri.to_owned() + } +} + +fn to_hexdigest>(t: T) -> String { + let h = Sha256::digest(t.as_ref()); + hex::encode(h.as_ref()) +} + +fn extract_endpoint_path(endpoint: &str) -> Option<&str> { + extract_endpoint_components(endpoint).1 +} + +fn extract_endpoint_components(endpoint: &str) -> (&str, Option<&str>) { + let unschemed = endpoint + .find("://") + .map(|p| &endpoint[p + 3..]) + .unwrap_or(endpoint); + unschemed + .find('/') + .map(|p| (&unschemed[..p], Some(&unschemed[p..]))) + .unwrap_or((unschemed, None)) +} + +fn extract_hostname(endpoint: &str) -> &str { + extract_endpoint_components(endpoint).0 +} + +/// Takes a `Region` enum and a service and formas a vaild DNS name. +/// E.g. `Region::ApNortheast1` and `s3` produces `s3.ap-northeast-1.amazonaws.com.cn` +fn build_hostname(service: &str, region: &Region) -> String { + //iam & cloudfront have only 1 endpoint, other services have region-based endpoints + match service { + "iam" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + Region::CnNorth1 | Region::CnNorthwest1 => { + format!("{}.{}.amazonaws.com.cn", service, region.name()) + } + _ => format!("{}.amazonaws.com", service), + }, + "chime" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + _ => format!("service.{}.aws.amazon.com", service), + }, + "cloudfront" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + _ => format!("{}.amazonaws.com", service), + }, + "importexport" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + _ => "importexport.amazonaws.com".to_owned(), + }, + "s3" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + Region::UsEast1 => "s3.amazonaws.com".to_string(), + Region::CnNorth1 | Region::CnNorthwest1 => { + format!("s3.{}.amazonaws.com.cn", region.name()) + } + _ => format!("s3-{}.amazonaws.com", region.name()), + }, + "route53" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + _ => "route53.amazonaws.com".to_owned(), + }, + "sdb" => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + Region::UsEast1 => "sdb.amazonaws.com".to_string(), + _ => format!("sdb.{}.amazonaws.com", region.name()), + }, + _ => match *region { + Region::Custom { ref endpoint, .. } => extract_hostname(endpoint).to_owned(), + Region::CnNorth1 | Region::CnNorthwest1 => { + format!("{}.{}.amazonaws.com.cn", service, region.name()) + } + _ => format!("{}.{}.amazonaws.com", service, region.name()), + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use time::empty_tm; + + #[test] + fn get_hostname_none_present() { + let request = SignedRequest::new("POST", "sqs", &Region::UsEast1, "/"); + assert_eq!("sqs.us-east-1.amazonaws.com", request.hostname()); + } + + #[test] + fn convert_request() { + use http::{Method, Uri, Version}; + let mut request = SignedRequest::new("POST", "sqs", &Region::UsEast1, "/"); + request.sign(&AwsCredentials::new( + "foo_access_key", + "foo_secret_key", + None, + None, + )); + + let req: http::Request = request.try_into().unwrap(); + let expected_uri = Uri::from_static("https://sqs.us-east-1.amazonaws.com"); + assert_eq!(req.method(), Method::POST); + assert_eq!(req.uri(), &expected_uri); + assert_eq!(req.version(), Version::HTTP_11); + } + + #[test] + fn path_percent_encoded() { + let mut request = SignedRequest::new( + "GET", + "s3", + &Region::UsEast1, + "/path with spaces: the sequel", + ); + request.sign(&AwsCredentials::new( + "foo_access_key", + "foo_secret_key", + None, + None, + )); + assert_eq!( + "/path%20with%20spaces%3A%20the%20sequel", + request.canonical_uri() + ); + } + #[test] + fn query_encoding_escaped_chars() { + query_encoding_escaped_chars_range(0u8, 45u8); // \0 to '-' + query_encoding_escaped_chars_range(47u8, 48u8); // '/' to '0' + query_encoding_escaped_chars_range(58u8, 65u8); // '0' to 'A' + query_encoding_escaped_chars_range(91u8, 95u8); // '[' to '_' + query_encoding_escaped_chars_range(96u8, 97u8); // '`' to 'a' + query_encoding_escaped_chars_range(123u8, 126u8); // '{' to '~' + query_encoding_escaped_chars_range(127u8, 128u8); // DEL + } + fn query_encoding_escaped_chars_range(start: u8, end: u8) { + let mut params = Params::new(); + for code in start..end { + params.insert("k".to_owned(), Some((code as char).to_string())); + let enc = build_canonical_query_string(¶ms); + let expected = if (code as char) == '+' { + "k=%20".to_owned() + } else { + format!("k=%{:02X}", code) + }; + assert_eq!(expected, enc); + } + } + #[test] + fn query_string_encoding_outliers() { + let mut request = SignedRequest::new( + "GET", + "s3", + &Region::UsEast1, + "/pathwith%20already%20existing%20encoding and some not encoded values", + ); + request.add_param("arg1%7B", "arg1%7B"); + request.add_param("arg2%7B+%2B", "+%2B"); + assert_eq!( + super::build_canonical_query_string(&request.params), + "arg1%257B=arg1%257B&arg2%257B%20%252B=%20%252B" + ); + assert_eq!( + super::canonical_uri(&request.path, &Region::default()), + "/pathwith%2520already%2520existing%2520encoding%20and%20some%20not%20encoded%20values" + ); + } + #[test] + fn query_percent_encoded() { + let mut request = SignedRequest::new( + "GET", + "s3", + &Region::UsEast1, + "/path with spaces: the sequel++", + ); + request.add_param( + "key:with@funny&characters", + "value with/funny%characters/Рускии", + ); + let canonical_query_string = super::build_canonical_query_string(&request.params); + assert_eq!("key%3Awith%40funny%26characters=value%20with%2Ffunny%25characters%2F%D0%A0%D1%83%D1%81%D0%BA%D0%B8%D0%B8", + canonical_query_string); + let canonical_uri_string = super::canonical_uri(&request.path, &Region::default()); + assert_eq!( + "/path%20with%20spaces%3A%20the%20sequel%2B%2B", + canonical_uri_string + ); + } + + #[test] + fn signature_generation() { + let signature_foo = super::sign_string( + "foo", + "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + empty_tm(), + "us-west-1", + "s3", + ); + assert_eq!( + signature_foo, + "29673d1d856a7684ff6f0f53c542bae0bfbb1e564f531aff7568be9fd206383b".to_string() + ); + let signature_bar = super::sign_string( + "bar", + "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + empty_tm(), + "us-west-1", + "s3", + ); + assert_eq!( + signature_bar, + "2ba6879cd9e769d73df721dc90aafdaa843005d23f5b6c91d0744f804962e44f".to_string() + ); + } + + #[test] + fn signed_headers_unsigned_first() { + let mut headers = BTreeMap::new(); + + // This header is excluded from signing + headers.insert("content-length".to_owned(), vec![vec![]]); + + headers.insert("content-type".to_owned(), vec![vec![]]); + headers.insert("x-amz-date".to_owned(), vec![vec![]]); + assert_eq!(super::signed_headers(&headers), "content-type;x-amz-date"); + } + + #[test] + fn signed_headers_unsigned_in_center() { + let mut headers = BTreeMap::new(); + headers.insert("cache-control".to_owned(), vec![vec![]]); + + // This header is excluded from signing + headers.insert("content-length".to_owned(), vec![vec![]]); + + headers.insert("content-type".to_owned(), vec![vec![]]); + headers.insert("host".to_owned(), vec![vec![]]); + headers.insert("x-amz-date".to_owned(), vec![vec![]]); + + assert_eq!( + super::signed_headers(&headers), + "cache-control;content-type;host;x-amz-date" + ); + } + + #[test] + fn signed_headers_unsigned_last() { + let mut headers = BTreeMap::new(); + headers.insert("cache-control".to_owned(), vec![vec![]]); + + // This header is excluded from signing + headers.insert("content-length".to_owned(), vec![vec![]]); + + assert_eq!(super::signed_headers(&headers), "cache-control"); + } + + #[test] + fn canonical_uri_combos() { + assert_eq!(super::canonical_uri("", &Region::default()), "/"); + assert_eq!(super::canonical_uri("/foo", &Region::default()), "/foo"); + assert_eq!( + super::canonical_uri( + "", + &Region::Custom { + name: Region::UsEast1.name().into(), + endpoint: "http://localhost:8000/path".into() + } + ), + "/path" + ); + assert_eq!( + super::canonical_uri( + "/foo", + &Region::Custom { + name: Region::UsEast1.name().into(), + endpoint: "http://localhost:8000/path".into() + } + ), + "/path/foo" + ); + assert_eq!( + super::canonical_uri( + "/foo", + &Region::Custom { + name: Region::UsEast1.name().into(), + endpoint: "http://localhost:8000".into() + } + ), + "/foo" + ); + } + + #[test] + fn extract_hostname() { + assert_eq!( + super::extract_hostname("hostname.with.no.scheme"), + "hostname.with.no.scheme" + ); + assert_eq!( + super::extract_hostname("http://hostname.with.scheme"), + "hostname.with.scheme" + ); + assert_eq!( + super::extract_hostname("https://hostname.with.scheme"), + "hostname.with.scheme" + ); + + assert_eq!( + super::extract_hostname("hostname.with.no.scheme/test"), + "hostname.with.no.scheme" + ); + assert_eq!( + super::extract_hostname("http://hostname.with.scheme/test"), + "hostname.with.scheme" + ); + assert_eq!( + super::extract_hostname("https://hostname.with.scheme/test"), + "hostname.with.scheme" + ); + } + + #[test] + fn x_amz_content_sha256_header_is_signed() { + // https://github.com/rusoto/rusoto/issues/1463 + + let mut request = SignedRequest::new("GET", "s3", &Region::UsEast1, "/path"); + request.sign(&AwsCredentials::new( + "foo_access_key", + "foo_secret_key", + None, + None, + )); + + let authorization_headers = request.headers.get("authorization").unwrap(); + let authorization_header = authorization_headers[0].clone(); + let authorization_header = String::from_utf8(authorization_header).unwrap(); + + // we want to check that "x-amz-content-sha256" header is signed + // and "authorization" header includes all signed headers + assert!(authorization_header.contains("x-amz-content-sha256")); + } +} diff --git a/rusoto/signature/src/stream.rs b/rusoto/signature/src/stream.rs new file mode 100644 index 00000000000..ef6feb67a95 --- /dev/null +++ b/rusoto/signature/src/stream.rs @@ -0,0 +1,165 @@ +use std::fmt; +use std::io; + +use bytes::Bytes; +use futures::{future, stream, Async, Future, Poll, Stream}; +use tokio::io::AsyncRead; + +/// Stream of bytes. +pub struct ByteStream { + size_hint: Option, + inner: Box + Send + 'static>, +} + +impl ByteStream { + /// Create a new `ByteStream` by wrapping a `futures` stream. + pub fn new(stream: S) -> ByteStream + where + S: Stream + Send + 'static, + { + ByteStream { + size_hint: None, + inner: Box::new(stream), + } + } + + pub(crate) fn size_hint(&self) -> Option { + self.size_hint + } + + /// Return an implementation of `AsyncRead` that uses async i/o to consume the stream. + pub fn into_async_read(self) -> impl AsyncRead + Send { + ImplAsyncRead::new(self.inner) + } + + /// Return an implementation of `Read` that uses blocking i/o to consume the stream. + pub fn into_blocking_read(self) -> impl io::Read + Send { + ImplBlockingRead::new(self.inner) + } +} + +impl From> for ByteStream { + fn from(buf: Vec) -> ByteStream { + ByteStream { + size_hint: Some(buf.len()), + inner: Box::new(stream::once(Ok(Bytes::from(buf)))), + } + } +} + +impl fmt::Debug for ByteStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "", self.size_hint) + } +} + +impl Stream for ByteStream { + type Item = Bytes; + type Error = io::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.inner.poll() + } +} + +struct ImplAsyncRead { + buffer: io::Cursor, + stream: stream::Fuse + Send>>, +} + +impl ImplAsyncRead { + fn new(stream: Box + Send>) -> Self { + ImplAsyncRead { + buffer: io::Cursor::new(Bytes::new()), + stream: stream.fuse(), + } + } +} + +impl io::Read for ImplAsyncRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if buf.is_empty() { + return Ok(0); + } + loop { + let n = self.buffer.read(buf)?; + if n > 0 { + return Ok(n); + } + match self.stream.poll()? { + Async::NotReady => { + return Err(io::ErrorKind::WouldBlock.into()); + } + Async::Ready(Some(buffer)) => { + self.buffer = io::Cursor::new(buffer); + continue; + } + Async::Ready(None) => { + return Ok(0); + } + } + } + } +} + +impl AsyncRead for ImplAsyncRead {} + +struct ImplBlockingRead { + inner: ImplAsyncRead, +} + +impl ImplBlockingRead { + fn new(stream: Box + Send>) -> Self { + ImplBlockingRead { + inner: ImplAsyncRead::new(stream), + } + } +} + +impl io::Read for ImplBlockingRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + future::poll_fn(|| self.inner.poll_read(buf)).wait() + } +} + +#[test] +fn test_async_read() { + use bytes::Bytes; + use std::io::Read; + + let chunks = vec![Bytes::from_static(b"1234"), Bytes::from_static(b"5678")]; + let stream = ByteStream::new(stream::iter_ok(chunks)); + let mut async_read = stream.into_async_read(); + + let mut buf = [0u8; 3]; + assert_eq!(async_read.read(&mut buf).unwrap(), 3); + assert_eq!(&buf[..3], b"123"); + assert_eq!(async_read.read(&mut buf).unwrap(), 1); + assert_eq!(&buf[..1], b"4"); + assert_eq!(async_read.read(&mut buf).unwrap(), 3); + assert_eq!(&buf[..3], b"567"); + assert_eq!(async_read.read(&mut buf).unwrap(), 1); + assert_eq!(&buf[..1], b"8"); + assert_eq!(async_read.read(&mut buf).unwrap(), 0); +} + +#[test] +fn test_blocking_read() { + use bytes::Bytes; + use std::io::Read; + + let chunks = vec![Bytes::from_static(b"1234"), Bytes::from_static(b"5678")]; + let stream = ByteStream::new(stream::iter_ok(chunks)); + let mut async_read = stream.into_blocking_read(); + + let mut buf = [0u8; 3]; + assert_eq!(async_read.read(&mut buf).unwrap(), 3); + assert_eq!(&buf[..3], b"123"); + assert_eq!(async_read.read(&mut buf).unwrap(), 1); + assert_eq!(&buf[..1], b"4"); + assert_eq!(async_read.read(&mut buf).unwrap(), 3); + assert_eq!(&buf[..3], b"567"); + assert_eq!(async_read.read(&mut buf).unwrap(), 1); + assert_eq!(&buf[..1], b"8"); + assert_eq!(async_read.read(&mut buf).unwrap(), 0); +} diff --git a/service_crategen/Cargo.toml b/service_crategen/Cargo.toml index f065a1257ba..c88243ddc2f 100644 --- a/service_crategen/Cargo.toml +++ b/service_crategen/Cargo.toml @@ -13,8 +13,6 @@ version = "0.1.0" edition = "2018" [dependencies] -Inflector = "0.7.0" -clap = "2.33.0" hoedown = "6.0.0" lazy_static = "1.3.0" rayon = "1.0.3" @@ -24,6 +22,14 @@ serde_derive = "1.0.91" serde_json = "1.0.39" toml = "0.5.1" +[dependencies.Inflector] +version = "0.11.4" +default-features = false + +[dependencies.clap] +version = "2.33.0" +default-features = false + [dependencies.clippy] optional = true version = "0.0" diff --git a/service_crategen/botocore b/service_crategen/botocore index 29f598cd50b..b07f62c14ce 160000 --- a/service_crategen/botocore +++ b/service_crategen/botocore @@ -1 +1 @@ -Subproject commit 29f598cd50b3ff2d5ad514fc6352c90136e0772e +Subproject commit b07f62c14ce0b5a3403cba7c561477fd6a90312e diff --git a/service_crategen/services.json b/service_crategen/services.json index ad28aa44843..6c64d152a57 100644 --- a/service_crategen/services.json +++ b/service_crategen/services.json @@ -1,559 +1,571 @@ { "acm": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-12-08", "baseTypeName": "Acm" }, "acm-pca": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-08-22", "baseTypeName": "AcmPca" }, "alexaforbusiness": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-09", "baseTypeName": "AlexaForBusiness" }, "amplify": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-07-25", "baseTypeName": "Amplify" }, "apigateway": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-07-09", "baseTypeName": "ApiGateway" }, "apigatewaymanagementapi": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-11-29", "baseTypeName": "ApiGatewayManagementApi" }, "apigatewayv2": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-11-29", "baseTypeName": "ApiGatewayV2" }, "application-autoscaling": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-02-06", "baseTypeName": "ApplicationAutoScaling" }, + "appmesh": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2019-01-25", + "baseTypeName": "AppMesh" + }, "appstream": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-12-01", "baseTypeName": "AppStream" }, "appsync": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-07-25", "baseTypeName": "AppSync" }, "athena": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-05-18", "baseTypeName": "Athena" }, "autoscaling": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2011-01-01", "baseTypeName": "Autoscaling" }, "autoscaling-plans": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-01-06", "baseTypeName": "AutoscalingPlans" }, "batch": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-08-10", "baseTypeName": "Batch" }, "budgets": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-10-20", "baseTypeName": "Budgets" }, "ce": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-25", "baseTypeName": "CostExplorer" }, "chime": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-05-01", "baseTypeName": "Chime" }, "cloud9": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-23", "baseTypeName": "Cloud9" }, "clouddirectory": { - "version": "0.40.0", - "coreVersion": "0.40.0", - "protocolVersion": "2016-05-10", + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2017-01-11", "baseTypeName": "CloudDirectory" }, "cloudformation": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-05-15", "baseTypeName": "CloudFormation" }, "cloudfront": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-11-05", "baseTypeName": "CloudFront" }, "cloudhsm": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-05-30", "baseTypeName": "CloudHsm" }, "cloudhsmv2": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-04-28", "baseTypeName": "CloudHsmv2" }, "cloudsearch": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-01-01", "baseTypeName": "CloudSearch" }, "cloudsearchdomain": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-01-01", "baseTypeName": "CloudSearchDomain" }, "cloudtrail": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-11-01", "baseTypeName": "CloudTrail" }, "cloudwatch": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-08-01", "baseTypeName": "CloudWatch" }, "codebuild": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-10-06", "baseTypeName": "CodeBuild" }, "codecommit": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-04-13", "baseTypeName": "CodeCommit" }, "codedeploy": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-10-06", "baseTypeName": "CodeDeploy" }, "codepipeline": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-07-09", "baseTypeName": "CodePipeline" }, "codestar": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-04-19", "baseTypeName": "CodeStar" }, "cognito-identity": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-06-30", "baseTypeName": "CognitoIdentity" }, "cognito-idp": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-04-18", "baseTypeName": "CognitoIdentityProvider" }, "cognito-sync": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-06-30", "baseTypeName": "CognitoSync" }, "comprehend": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-27", "baseTypeName": "Comprehend" }, "comprehendmedical": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-10-30", "baseTypeName": "ComprehendMedical" }, "config": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-11-12", "baseTypeName": "ConfigService" }, "connect": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-08-08", "baseTypeName": "Connect" }, "cur": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-01-06", "baseTypeName": "CostAndUsageReport" }, "datapipeline": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-10-29", "baseTypeName": "DataPipeline" }, "dax": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-04-19", "baseTypeName": "DynamodbAccelerator" }, "devicefarm": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-06-23", "baseTypeName": "DeviceFarm" }, "directconnect": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-10-25", "baseTypeName": "DirectConnect" }, "discovery": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-11-01", "baseTypeName": "Discovery" }, "dms": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-01-01", "baseTypeName": "DatabaseMigrationService" }, "docdb": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-10-31", "baseTypeName": "Docdb" }, "ds": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-04-16", "baseTypeName": "DirectoryService" }, "dynamodb": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-08-10", "baseTypeName": "DynamoDb" }, "dynamodbstreams": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-08-10", "baseTypeName": "DynamoDbStreams" }, "ec2": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-15", "baseTypeName": "Ec2" }, + "ec2-instance-connect": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2018-04-02", + "baseTypeName": "Ec2InstanceConnect" + }, "ecr": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-09-21", "baseTypeName": "Ecr" }, "ecs": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-11-13", "baseTypeName": "Ecs" }, "elasticache": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-02-02", "baseTypeName": "ElastiCache" }, "elasticbeanstalk": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-12-01", "baseTypeName": "ElasticBeanstalk" }, "efs": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-02-01", "baseTypeName": "Efs" }, "eks": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-01", "baseTypeName": "Eks" }, "elastictranscoder": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-09-25", "baseTypeName": "Ets" }, "elb": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-06-01", "baseTypeName": "Elb" }, "elbv2": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-12-01", "baseTypeName": "Elb" }, "emr": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2009-03-31", "baseTypeName": "Emr" }, "events": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-10-07", - "baseTypeName": "CloudWatchEvents" + "baseTypeName": "EventBridge" }, "firehose": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-08-04", "baseTypeName": "KinesisFirehose" }, "fms": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-01-01", "baseTypeName": "Fms" }, "fsx": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-03-01", "baseTypeName": "Fsx" }, "gamelift": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-10-01", "baseTypeName": "GameLift" }, "glacier": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-06-01", "baseTypeName": "Glacier" }, "glue": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-03-31", "baseTypeName": "Glue" }, "greengrass": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-06-07", "baseTypeName": "GreenGrass" }, "guardduty": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-28", "baseTypeName": "GuardDuty" }, "health": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-08-04", "baseTypeName": "AWSHealth" }, "iam": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-05-08", "baseTypeName": "Iam" }, "importexport": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-06-01", "baseTypeName": "ImportExport" }, "inspector": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-02-16", "baseTypeName": "Inspector" }, "iot": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-05-28", "baseTypeName": "Iot" }, "iotanalytics": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-27", "baseTypeName": "IotAnalytics" }, "iot-data": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-05-28", "baseTypeName": "IotData" }, "iot-jobs-data": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-29", "baseTypeName": "IotJobsData" }, "iot1click-devices": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-05-14", "baseTypeName": "Iot1ClickDevices" }, "iot1click-projects": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-05-14", "baseTypeName": "Iot1ClickProjects" }, "kafka": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-11-14", "baseTypeName": "Kafka" }, "kinesis": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-12-02", "baseTypeName": "Kinesis" }, "kinesisanalytics": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-08-14", "baseTypeName": "KinesisAnalytics" }, "kinesisvideo": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-30", "baseTypeName": "KinesisVideo" }, "kinesis-video-media": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-30", "baseTypeName": "KinesisVideoMedia" }, "kinesis-video-archived-media": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-30", "baseTypeName": "KinesisVideoArchivedMedia" }, "kms": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-11-01", "baseTypeName": "Kms" }, "lambda": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-03-31", "baseTypeName": "Lambda" }, "lex-models": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-04-19", "baseTypeName": "LexModels" }, "lex-runtime": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-28", "baseTypeName": "LexRuntime" }, "license-manager": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-08-01", "baseTypeName": "LicenseManager" }, "lightsail": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-28", "baseTypeName": "Lightsail" }, "logs": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-03-28", "baseTypeName": "CloudWatchLogs", "customDevDependencies": { @@ -561,290 +573,308 @@ } }, "machinelearning": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-12-12", "baseTypeName": "MachineLearning" }, "macie": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-12-19", "baseTypeName": "Macie" }, "marketplacecommerceanalytics": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-07-01", "baseTypeName": "MarketplaceCommerceAnalytics" }, "marketplace-entitlement": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-01-11", "baseTypeName": "MarketplaceEntitlement" }, "mediaconvert": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-08-29", "baseTypeName": "MediaConvert" }, "medialive": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-14", "baseTypeName": "MediaLive" }, "mediapackage": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-12", "baseTypeName": "MediaPackage" }, "mediastore": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-01", "baseTypeName": "MediaStore" }, "mediatailor": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-04-23", "baseTypeName": "MediaTailor" }, "meteringmarketplace": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-01-14", "baseTypeName": "MarketplaceMetering" }, "mgh": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-05-31", "baseTypeName": "MigrationHub" }, "mobile": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-07-01", "baseTypeName": "Mobile" }, "mq": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-27", "baseTypeName": "MQ" }, "mturk": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-01-17", "baseTypeName": "MechanicalTurk" }, "neptune": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-10-31", "baseTypeName": "Neptune" }, "opsworks": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-02-18", "baseTypeName": "OpsWorks" }, "opsworkscm": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-01", "baseTypeName": "OpsWorksCM" }, "organizations": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-28", "baseTypeName": "Organizations" }, "pi": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-02-27", "baseTypeName": "PerformanceInsights" }, "polly": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-06-10", "baseTypeName": "Polly" }, "pricing": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-15", "baseTypeName": "Pricing" }, + "qldb": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2019-01-02", + "baseTypeName": "Qldb" + }, + "qldb-session": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2019-07-11", + "baseTypeName": "QldbSession" + }, "ram": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-01-04", "baseTypeName": "Ram" }, "rds": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-10-31", "baseTypeName": "Rds" }, "rds-data": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-08-01", "baseTypeName": "RdsData" }, "redshift": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-12-01", "baseTypeName": "Redshift" }, "rekognition": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-06-27", "baseTypeName": "Rekognition" }, "resource-groups": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-11-27", "baseTypeName": "ResourceGroups" }, "resourcegroupstaggingapi": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-01-26", "baseTypeName": "ResourceGroupsTaggingApi" }, "route53": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-04-01", "baseTypeName": "Route53" }, "route53domains": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-05-15", "baseTypeName": "Route53Domains" }, "s3": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2006-03-01", "baseTypeName": "S3" }, "sagemaker": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-07-24", "baseTypeName": "SageMaker" }, "sagemaker-runtime": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-05-13", "baseTypeName": "SageMakerRuntime" }, "sdb": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2009-04-15", "baseTypeName": "SimpleDb" }, "secretsmanager": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-17", "baseTypeName": "SecretsManager" }, + "securityhub": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2018-10-26", + "baseTypeName": "SecurityHub" + }, "serverlessrepo": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-09-08", "baseTypeName": "ServerlessRepo" }, "servicecatalog": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-12-10", "baseTypeName": "ServiceCatalog" }, "servicediscovery": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-03-14", "baseTypeName": "ServiceDiscovery" }, "ses": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-12-01", "baseTypeName": "Ses" }, "shield": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-06-02", "baseTypeName": "Shield" }, "sms": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-10-24", "baseTypeName": "ServerMigrationService" }, "snowball": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-06-30", "baseTypeName": "Snowball" }, "sns": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2010-03-31", "baseTypeName": "Sns" }, "sqs": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-11-05", "baseTypeName": "Sqs" }, "ssm": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2014-11-06", "baseTypeName": "Ssm" }, "stepfunctions": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-23", "baseTypeName": "StepFunctions" }, "storagegateway": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-06-30", "baseTypeName": "StorageGateway" }, "sts": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2011-06-15", "customDependencies": { "chrono": "0.4.0" @@ -852,68 +882,80 @@ "baseTypeName": "Sts" }, "support": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2013-04-15", "baseTypeName": "AWSSupport" }, "swf": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2012-01-25", "baseTypeName": "Swf" }, + "textract": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2018-06-27", + "baseTypeName": "Textract" + }, "transcribe": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-26", "baseTypeName": "Transcribe" }, + "transfer": { + "version": "0.41.0", + "coreVersion": "0.41.0", + "protocolVersion": "2018-11-05", + "baseTypeName": "Transfer" + }, "translate": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-07-01", "baseTypeName": "Translate" }, "waf": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-08-24", "baseTypeName": "Waf" }, "waf-regional": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-11-28", "baseTypeName": "WAFRegional" }, "workdocs": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-05-01", "baseTypeName": "Workdocs" }, "worklink": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2018-09-25", "baseTypeName": "Worklink" }, "workmail": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2017-10-01", "baseTypeName": "Workmail" }, "workspaces": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2015-04-08", "baseTypeName": "Workspaces" }, "xray": { - "version": "0.40.0", - "coreVersion": "0.40.0", + "version": "0.41.0", + "coreVersion": "0.41.0", "protocolVersion": "2016-04-12", "baseTypeName": "XRay" } diff --git a/service_crategen/src/botocore.rs b/service_crategen/src/botocore.rs index 98415cfa446..17f91cee1eb 100644 --- a/service_crategen/src/botocore.rs +++ b/service_crategen/src/botocore.rs @@ -13,7 +13,7 @@ use serde_json; use crate::util; -const BOTOCORE_DIR: &'static str = concat!(env!("CARGO_MANIFEST_DIR"), "/botocore/botocore/data/"); +const BOTOCORE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/botocore/botocore/data/"); #[derive(Debug, Deserialize)] pub struct ServiceDefinition { @@ -320,7 +320,7 @@ impl<'a> Operation { &self .input .as_ref() - .expect(&format!("Operation input undefined for {}", self.name)) + .unwrap_or_else(|| panic!("Operation input undefined for {}", self.name)) .shape } diff --git a/service_crategen/src/cargo.rs b/service_crategen/src/cargo.rs index 914439fdb72..c8f811271dc 100644 --- a/service_crategen/src/cargo.rs +++ b/service_crategen/src/cargo.rs @@ -3,7 +3,7 @@ use toml; use serde_derive::{Deserialize, Serialize}; use std::collections::BTreeMap; -#[derive(Debug, Default, Clone, Deserialize, Serialize)] +#[derive(Debug, Default, Deserialize, Serialize)] pub struct Manifest { pub package: Metadata, pub badges: Option>, @@ -18,7 +18,7 @@ pub struct Manifest { pub features: Option>>, } -#[derive(Debug, Default, Clone, Deserialize, Serialize)] +#[derive(Debug, Default, Deserialize, Serialize)] pub struct Metadata { pub authors: Option>, pub description: Option, @@ -34,7 +34,7 @@ pub struct Metadata { pub exclude: Option>, } -#[derive(Debug, Clone, Deserialize, Serialize)] +#[derive(Debug, Deserialize, Serialize)] pub struct Badge { pub repository: String, pub branch: String, diff --git a/service_crategen/src/commands/generate/codegen/error_types.rs b/service_crategen/src/commands/generate/codegen/error_types.rs index bbc1467d801..ba3670ab122 100644 --- a/service_crategen/src/commands/generate/codegen/error_types.rs +++ b/service_crategen/src/commands/generate/codegen/error_types.rs @@ -227,7 +227,7 @@ impl XmlErrorTypes { } } - type_matchers.push(format!("_ => {{}}")); + type_matchers.push("_ => {}".to_string()); type_matchers.join(",") } } @@ -279,10 +279,8 @@ impl JsonErrorTypes { } } } - type_matchers.push(format!( - "\"ValidationException\" => return RusotoError::Validation(err.msg)", - )); - type_matchers.push(format!("_ => {{}}")); + type_matchers.push("\"ValidationException\" => return RusotoError::Validation(err.msg)".to_string()); + type_matchers.push("_ => {}".to_string()); type_matchers.join(",\n") } } diff --git a/service_crategen/src/commands/generate/codegen/mod.rs b/service_crategen/src/commands/generate/codegen/mod.rs index 76b644a06de..545a79d51ec 100644 --- a/service_crategen/src/commands/generate/codegen/mod.rs +++ b/service_crategen/src/commands/generate/codegen/mod.rs @@ -34,7 +34,11 @@ pub trait GenerateProtocol { /// Generate the various `use` statements required by the module generatedfor this service fn generate_prelude(&self, writer: &mut FileWriter, service: &Service<'_>) -> IoResult; - fn generate_method_signatures(&self, writer: &mut FileWriter, service: &Service<'_>) -> IoResult; + fn generate_method_signatures( + &self, + writer: &mut FileWriter, + service: &Service<'_>, + ) -> IoResult; /// Generate a method for each `Operation` in the `Service` to execute that method remotely /// @@ -94,7 +98,7 @@ pub fn generate_source(service: &Service<'_>, writer: &mut FileWriter) -> IoResu /// escape reserved words with an underscore pub fn generate_field_name(member_name: &str) -> String { let name = member_name.to_snake_case(); - if name == "return" || name == "type" { + if name == "return" || name == "type" || name == "match" { name + "_" } else { name @@ -127,11 +131,10 @@ where // must be updated to generate the changes. // // ================================================================= + #![allow(warnings)] use std::error::Error; use std::fmt; - - #[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::request::{{BufferedHttpResponse, DispatchSignedRequest}}; @@ -184,10 +187,7 @@ where /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> {type_name} {{ - {type_name} {{ - client: Client::shared(), - region - }} + Self::new_with_client(Client::shared(), region) }} pub fn new_with(request_dispatcher: D, credentials_provider: P, region: region::Region) -> {type_name} @@ -195,9 +195,14 @@ where P::Future: Send, D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send + {{ + Self::new_with_client(Client::new_with(credentials_provider, request_dispatcher), region) + }} + + pub fn new_with_client(client: Client, region: region::Region) -> {type_name} {{ {type_name} {{ - client: Client::new_with(credentials_provider, request_dispatcher), + client, region }} }} @@ -280,7 +285,7 @@ fn is_streaming_shape(service: &Service<'_>, name: &str) -> bool { .any(|(_, shape)| streaming_members(shape).any(|member| member.shape == name)) } -// do any type name mutation needed to avoid collisions with Rust types +// do any type name mutation for shapes needed to avoid collisions with Rust types and Error enum types fn mutate_type_name(service: &Service<'_>, type_name: &str) -> String { let capitalized = util::capitalize_first(type_name.to_owned()); @@ -306,6 +311,12 @@ fn mutate_type_name(service: &Service<'_>, type_name: &str) -> String { // EC2 has an CreateFleetError struct, avoid collision with our error enum "CreateFleetError" => "EC2CreateFleetError".to_owned(), + // codecommit has a BatchDescribeMergeConflictsError, avoid collision with our error enum + "BatchDescribeMergeConflictsError" => "CodeCommitBatchDescribeMergeConflictsError".to_owned(), + + // codecommit has a BatchGetCommitsError, avoid collision with our error enum + "BatchGetCommitsError" => "CodeCommitBatchGetCommitsError".to_owned(), + // otherwise make sure it's rust-idiomatic and capitalized _ => without_underscores, } @@ -318,10 +329,12 @@ pub fn mutate_type_name_for_streaming(type_name: &str) -> String { fn find_shapes_to_generate(service: &Service<'_>) -> BTreeSet { let mut shapes_to_generate = BTreeSet::::new(); + let mut visitor = |shape_name: &str, _shape: &Shape| { shapes_to_generate.insert(shape_name.to_owned()) }; - for (_, operation) in service.operations() { + + for operation in service.operations().values() { if let Some(ref input) = operation.input { service.visit_shapes(&input.shape, &mut visitor); } @@ -334,10 +347,14 @@ fn find_shapes_to_generate(service: &Service<'_>) -> BTreeSet { } } } - return shapes_to_generate; -} + shapes_to_generate +} -fn generate_types

(writer: &mut FileWriter, service: &Service<'_>, protocol_generator: &P) -> IoResult +fn generate_types

( + writer: &mut FileWriter, + service: &Service<'_>, + protocol_generator: &P, +) -> IoResult where P: GenerateProtocol, { @@ -447,7 +464,7 @@ where let test_attributes = if derived.iter().any(|&x| x == "Deserialize") && !derived.iter().any(|&x| x == "Serialize") { - "\n#[cfg_attr(test, derive(Serialize))]" + "\n#[cfg_attr(any(test, feature = \"serialize_structs\"), derive(Serialize))]" } else { "" }; @@ -556,6 +573,10 @@ fn generate_struct_fields( // See https://github.com/rusoto/rusoto/issues/1419 for more information if service.name() == "CodePipeline" && shape_name == "ActionRevision" && name == "revision_change_id" || name == "created" { lines.push(format!("pub {}: Option<{}>,", name, rs_type)) + // In pratice, Lex can return null values for slots that are not filled. The documentation + // does not mention that the slot values themselves can be null. + } else if service.name() == "Amazon Lex Runtime Service" && shape_name == "PostTextResponse" && name == "slots"{ + lines.push(format!("pub {}: Option<::std::collections::HashMap>>,", name)) } else if shape.required(member_name) { lines.push(format!("pub {}: {},", name, rs_type)) } else if name == "type" { diff --git a/service_crategen/src/commands/generate/codegen/query.rs b/service_crategen/src/commands/generate/codegen/query.rs index 48265f3a5bb..ac1238a9876 100644 --- a/service_crategen/src/commands/generate/codegen/query.rs +++ b/service_crategen/src/commands/generate/codegen/query.rs @@ -187,7 +187,7 @@ fn generate_list_serializer(service: &Service<'_>, shape: &Shape) -> String { if primitive { parts.push(format!( "params.put(&key, {});", - serialize_primitive_expression(&member_shape.shape_type, "obj") + serialize_primitive_expression(member_shape.shape_type, "obj") )); } else { parts.push(format!( @@ -217,16 +217,15 @@ fn list_member_format(service: &Service<'_>, flattened: bool) -> String { fn generate_map_serializer(service: &Service<'_>, shape: &Shape) -> String { let mut parts = Vec::new(); - let prefix_snip: String; - if service.service_id() == Some("SNS") + let prefix_snip = if service.service_id() == Some("SNS") && shape.value.is_some() && (shape.value.as_ref().unwrap().shape == "MessageAttributeValue" || shape.value.as_ref().unwrap().shape == "AttributeValue") { - prefix_snip = "let prefix = format!(\"{}.entry.{}\", name, index+1);".to_string(); + "let prefix = format!(\"{}.entry.{}\", name, index+1);".to_string() } else { - prefix_snip = "let prefix = format!(\"{}.{}\", name, index+1);".to_string(); - } + "let prefix = format!(\"{}.{}\", name, index+1);".to_string() + }; // the key is always a string type parts.push(format!( @@ -243,9 +242,9 @@ fn generate_map_serializer(service: &Service<'_>, shape: &Shape) -> String { let primitive_value = value_shape.is_primitive(); if primitive_value { - parts.push(format!( - "params.put(&format!(\"{{}}.{{}}\", prefix, \"Value\"), &value);" - )); + parts.push( + "params.put(&format!(\"{}.{}\", prefix, \"Value\"), &value);".to_string() + ); } else { parts.push(format!( "{value_type}Serializer::serialize( @@ -357,7 +356,7 @@ fn optional_primitive_field_serializer( return "".to_owned(); } let member_shape = service.shape_for_member(member).unwrap(); - let expression = serialize_primitive_expression(&member_shape.shape_type, "field_value"); + let expression = serialize_primitive_expression(member_shape.shape_type, "field_value"); format!( "if let Some(ref field_value) = obj.{field_name} {{ @@ -376,7 +375,7 @@ fn required_primitive_field_serializer( ) -> String { let member_shape = service.shape_for_member(member).unwrap(); let expression = serialize_primitive_expression( - &member_shape.shape_type, + member_shape.shape_type, &format!("obj.{}", generate_field_name(member_name)), ); @@ -387,8 +386,8 @@ fn required_primitive_field_serializer( ) } -fn serialize_primitive_expression(shape_type: &ShapeType, var_name: &str) -> String { - match *shape_type { +fn serialize_primitive_expression(shape_type: ShapeType, var_name: &str) -> String { + match shape_type { ShapeType::String | ShapeType::Timestamp | ShapeType::Integer diff --git a/service_crategen/src/commands/generate/codegen/rest_json.rs b/service_crategen/src/commands/generate/codegen/rest_json.rs index c195e29a8ab..c81dd775e1a 100644 --- a/service_crategen/src/commands/generate/codegen/rest_json.rs +++ b/service_crategen/src/commands/generate/codegen/rest_json.rs @@ -31,7 +31,7 @@ impl GenerateProtocol for RestJsonGenerator { RusotoFuture<{output_type}, {error_type}>; ", documentation = generate_documentation(operation).unwrap_or_else(|| "".to_owned()), - method_signature = generate_method_signature(operation, input_shape), + method_signature = generate_method_signature(operation, *input_shape), error_type = error_type_name(service, operation_name), output_type = output_type )? @@ -80,7 +80,7 @@ impl GenerateProtocol for RestJsonGenerator { }} ", documentation = generate_documentation(operation).unwrap_or_else(|| "".to_owned()), - method_signature = generate_method_signature(operation, input_shape), + method_signature = generate_method_signature(operation, *input_shape), endpoint_prefix = service.signing_name(), modify_endpoint_prefix = generate_endpoint_modification(service).unwrap_or_else(|| "".to_owned()), http_method = operation.http.method, @@ -97,7 +97,7 @@ impl GenerateProtocol for RestJsonGenerator { service, operation ).unwrap_or_else(|| "".to_string()), - load_payload = generate_payload(service, input_shape).unwrap_or_else(|| "".to_string()), + load_payload = generate_payload(service, *input_shape).unwrap_or_else(|| "".to_string()), load_params = rest_request_generator::generate_params_loading_string(service, operation).unwrap_or_else(|| "".to_string()), default_headers = generate_default_headers(service), set_headers = generate_headers(service).unwrap_or_else(|| "".to_string()), @@ -182,7 +182,7 @@ fn generate_endpoint_modification(service: &Service<'_>) -> Option { // IoT defines a lot of empty (and therefore unnecessary) request shapes // don't clutter method signatures with them -fn generate_method_signature(operation: &Operation, shape: &Option<&Shape>) -> String { +fn generate_method_signature(operation: &Operation, shape: Option<&Shape>) -> String { match shape { Some(s) => match s.members { Some(ref members) if !members.is_empty() => format!( @@ -203,7 +203,7 @@ fn generate_method_signature(operation: &Operation, shape: &Option<&Shape>) -> S } // Figure out what, if anything, should be sent as the body of the http request -fn generate_payload(service: &Service<'_>, input_shape: &Option<&Shape>) -> Option { +fn generate_payload(service: &Service<'_>, input_shape: Option<&Shape>) -> Option { let i = input_shape.as_ref()?; let declare_payload = match i.payload { // if the input shape explicitly specifies a payload field, use that @@ -226,10 +226,9 @@ fn generate_payload(service: &Service<'_>, input_shape: &Option<&Shape>) -> Opti } }; - if declare_payload.is_some() { - Some(declare_payload.unwrap() + "request.set_payload(encoded);") - } else { - None + match declare_payload { + Some(value) => Some(value + "request.set_payload(encoded);"), + _ => None } } @@ -355,9 +354,7 @@ fn generate_body_parser(operation: &Operation, service: &Service<'_>) -> String // is the shape required? let payload_shape_required = match output_shape.required { Some(ref s) => { - // if there's any required shape present the body payload parser will handle it - // This can't be converted to `s.is_empty()`. TODO: find out why. - s.len() > 0 + !s.is_empty() } None => false, }; @@ -388,15 +385,16 @@ fn payload_body_parser( mutable_result: bool, payload_required: bool, ) -> String { - let response_body = match payload_required { - true => match payload_type { + let response_body = if payload_required { + match payload_type { ShapeType::Blob => "response.body", _ => "String::from_utf8_lossy(response.body.as_ref())", - }, - false => match payload_type { + } + } else { + match payload_type { ShapeType::Blob => "Some(response.body)", _ => "Some(String::from_utf8_lossy(response.body.as_ref()).into_owned())", - }, + } }; format!( diff --git a/service_crategen/src/commands/generate/codegen/rest_request_generator.rs b/service_crategen/src/commands/generate/codegen/rest_request_generator.rs index e7c827e39bb..48d937890ae 100644 --- a/service_crategen/src/commands/generate/codegen/rest_request_generator.rs +++ b/service_crategen/src/commands/generate/codegen/rest_request_generator.rs @@ -7,9 +7,7 @@ use lazy_static::lazy_static; // Add request headers for any shape members marked as headers pub fn generate_headers(service: &Service<'_>, operation: &Operation) -> Option { - if operation.input.is_none() { - return None; - } + operation.input.as_ref()?; let shape = service .get_shape(&operation.input.as_ref().unwrap().shape) @@ -20,9 +18,7 @@ pub fn generate_headers(service: &Service<'_>, operation: &Operation) -> Option< .unwrap() .iter() .filter_map(|(member_name, member)| { - if member.location.is_none() { - return None; - } + member.location.as_ref()?; match &member.location.as_ref().unwrap()[..] { "header" => { if shape.required(member_name) { @@ -74,9 +70,7 @@ pub fn generate_headers(service: &Service<'_>, operation: &Operation) -> Option< } pub fn generate_params_loading_string(service: &Service<'_>, operation: &Operation) -> Option { - if operation.input.is_none() { - return None; - } + operation.input.as_ref()?; let input_type = operation.input_shape(); let input_shape = service.get_shape(input_type).unwrap(); @@ -172,16 +166,18 @@ fn generate_static_param_strings(operation: &Operation) -> Vec { // Sometimes the key has the query param already set, like "list-type=2" // These need to get parsed out of the URI and added to the params map // with the other "queryparam" members - if let Some(ref key) = maybe_params { - match key.find('=') { - Some(_) => { - let key_val_vec: Vec<&str> = key.split('=').collect(); - static_params.push(format!( - "params.put(\"{}\", \"{}\");", - key_val_vec[0], key_val_vec[1] - )) + if let Some(ref params) = maybe_params { + for param in params.split('&') { + match param.find('=') { + Some(_) => { + let key_val_vec: Vec<&str> = param.split('=').collect(); + static_params.push(format!( + "params.put(\"{}\", \"{}\");", + key_val_vec[0], key_val_vec[1] + )) + } + None => static_params.push(format!("params.put_key(\"{}\");", param)), } - None => static_params.push(format!("params.put_key(\"{}\");", key)), } }; @@ -198,10 +194,8 @@ fn generate_snake_case_uri(request_uri: &str) -> String { // convert fooBar to foo_bar for caps in URI_ARGS_SNAKE_REGEX.captures_iter(request_uri) { let to_find = caps.get(0).expect("nothing captured").as_str(); - // this silliness is because {fooBar} gets converted to {foo_bar_} and sometimes {_foo_bar}. - let replacement = Inflector::to_snake_case(caps.get(0).unwrap().as_str()) - .replace("_}", "}") - .replace("{_", "{"); + // Wrap with curly braces again: + let replacement = format!("{{{}}}", Inflector::to_snake_case(caps.get(0).unwrap().as_str())); snake = snake.replace(to_find, &replacement); } @@ -346,4 +340,21 @@ mod tests { ); } + #[test] + fn generate_static_param_strings_parses_mixed_params() { + let service_json = "{\ + \"name\": \"Search\", + \"http\":{ + \"method\":\"GET\", + \"requestUri\":\"/2013-01-01/suggest?format=sdk&pretty\" + }\ + }"; + let operation = serde_json::from_str(service_json).expect("failed to parse operation json"); + + let static_params = generate_static_param_strings(&operation); + + assert_eq!(static_params.len(), 2); + assert_eq!(static_params.get(0), Some(&"params.put(\"format\", \"sdk\");".to_owned())); + assert_eq!(static_params.get(1), Some(&"params.put_key(\"pretty\");".to_owned())); + } } diff --git a/service_crategen/src/commands/generate/codegen/rest_response_parser.rs b/service_crategen/src/commands/generate/codegen/rest_response_parser.rs index 9746eeb18e0..ee31c19a4ea 100644 --- a/service_crategen/src/commands/generate/codegen/rest_response_parser.rs +++ b/service_crategen/src/commands/generate/codegen/rest_response_parser.rs @@ -12,9 +12,7 @@ pub fn generate_response_headers_parser( operation: &Operation, ) -> Option { // nothing to do if there's no output type - if operation.output.is_none() { - return None; - } + operation.output.as_ref()?; let shape = service .get_shape(&operation.output.as_ref().unwrap().shape) diff --git a/service_crategen/src/commands/generate/codegen/rest_xml.rs b/service_crategen/src/commands/generate/codegen/rest_xml.rs index 27a71faa5c0..095aa216a29 100644 --- a/service_crategen/src/commands/generate/codegen/rest_xml.rs +++ b/service_crategen/src/commands/generate/codegen/rest_xml.rs @@ -155,14 +155,10 @@ fn generate_documentation(operation: &Operation, service: &Service<'_>) -> Strin }; // Specialized docs for services: - match service.name().to_ascii_lowercase().as_ref() { - "route 53" => { + if let "route 53" = service.name().to_ascii_lowercase().as_ref() { if operation.name == "ChangeResourceRecordSets" { docs = format!("/// For TXT records, see util::quote_txt_record\n{}", docs); } - () - } - _ => (), } docs @@ -170,9 +166,7 @@ fn generate_documentation(operation: &Operation, service: &Service<'_>) -> Strin fn generate_payload_serialization(service: &Service<'_>, operation: &Operation) -> Option { // nothing to do if there's no input type - if operation.input.is_none() { - return None; - } + operation.input.as_ref()?; let input = operation.input.as_ref().unwrap(); let input_shape = service.get_shape(&input.shape).unwrap(); diff --git a/service_crategen/src/commands/generate/codegen/tests.rs b/service_crategen/src/commands/generate/codegen/tests.rs index 58365b5e6ce..4cfd851b029 100644 --- a/service_crategen/src/commands/generate/codegen/tests.rs +++ b/service_crategen/src/commands/generate/codegen/tests.rs @@ -8,11 +8,11 @@ use super::{FileWriter, IoResult}; use crate::util; use crate::Service; -const BOTOCORE_ERROR_RESPONSE_TESTS_DIR: &'static str = concat!( +const BOTOCORE_ERROR_RESPONSE_TESTS_DIR: &str = concat!( env!("CARGO_MANIFEST_DIR"), "/botocore/tests/unit/response_parsing/xml/errors/" ); -const BOTOCORE_VALID_RESPONSE_TESTS_DIR: &'static str = concat!( +const BOTOCORE_VALID_RESPONSE_TESTS_DIR: &str = concat!( env!("CARGO_MANIFEST_DIR"), "/botocore/tests/unit/response_parsing/xml/responses/" ); @@ -99,9 +99,7 @@ fn generate_response_parse_test( ) -> Option { let maybe_operation = case_insensitive_btreemap_get(service.operations(), &response.action); - if maybe_operation.is_none() { - return None; - } + maybe_operation?; let operation = maybe_operation.unwrap(); let request_params; @@ -165,13 +163,13 @@ impl Response { ); service_name.and_then(|s| { - action.and_then(|a| { - Some(Response { + action.map(|a| { + Response { service: s, action: a, file_name: file_name.to_string_lossy().into_owned(), full_path: path.to_owned(), - }) + } }) }) } else { diff --git a/service_crategen/src/commands/generate/codegen/xml_payload_parser.rs b/service_crategen/src/commands/generate/codegen/xml_payload_parser.rs index 763af4f9ab8..2b758d800e4 100644 --- a/service_crategen/src/commands/generate/codegen/xml_payload_parser.rs +++ b/service_crategen/src/commands/generate/codegen/xml_payload_parser.rs @@ -187,7 +187,7 @@ fn xml_body_parser( }} else {{ let reader = EventReader::new_with_config( response.body.as_ref(), - ParserConfig::new().trim_whitespace(true) + ParserConfig::new().trim_whitespace(false) ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); @@ -205,16 +205,15 @@ fn xml_body_parser( } fn generate_deserializer_body(name: &str, shape: &Shape, service: &Service<'_>) -> String { - match (service.endpoint_prefix(), name) { - ("s3", "GetBucketLocationOutput") => { + if let ("s3", "GetBucketLocationOutput") = (service.endpoint_prefix(), name) { // override custom deserializer let struct_field_deserializers = shape .members .as_ref() .unwrap() .iter() - .filter_map(|(member_name, member)| { - Some(format!( + .map(|(member_name, member)| { + format!( "obj.{field_name} = {parse_expression};", field_name = generate_field_name(member_name), parse_expression = generate_struct_field_parse_expression( @@ -225,7 +224,7 @@ fn generate_deserializer_body(name: &str, shape: &Shape, service: &Service<'_>) &member_name.to_string(), false ) - )) + ) }) .collect::>() .join("\n"); @@ -236,8 +235,6 @@ fn generate_deserializer_body(name: &str, shape: &Shape, service: &Service<'_>) name = name, struct_field_deserializers = struct_field_deserializers ); - } - _ => {} } match shape.shape_type { ShapeType::List => generate_list_deserializer(shape, service), diff --git a/service_crategen/src/commands/generate/mod.rs b/service_crategen/src/commands/generate/mod.rs index 1d670b9e958..eaa994a5bca 100644 --- a/service_crategen/src/commands/generate/mod.rs +++ b/service_crategen/src/commands/generate/mod.rs @@ -57,7 +57,7 @@ fn generate_examples(crate_dir_path: &Path) -> Option { pub fn generate_services( services: &BTreeMap, out_dir: &Path, - service_to_generate: &Option<&Vec<&str>>, + service_to_generate: Option<&Vec<&str>>, ) { if !out_dir.exists() { fs::create_dir(out_dir).expect("Unable to create output directory"); @@ -86,6 +86,7 @@ pub fn generate_services( features.insert("default".into(), vec!["native-tls".into()]); features.insert("native-tls".into(), vec!["rusoto_core/native-tls".into()]); features.insert("rustls".into(), vec!["rusoto_core/rustls".into()]); + features.insert("serialize_structs".into(), vec![]); let service_dependencies = service.get_dependencies(); let service_dev_dependencies = service.get_dev_dependencies(); @@ -185,6 +186,13 @@ To use `{crate_name}` in your application, add it as a dependency in your `Cargo {crate_name} = "{version}" ``` +## Crate Features +- `native-tls` - use platform-specific TLS implementation. +- `rustls` - use rustls TLS implementation. +- `serialize_structs` - output structs of most operations get `derive(Serialize)`. + +Note: the crate will use the `native-tls` TLS implementation by default. + ## Contributing See [CONTRIBUTING][contributing]. diff --git a/service_crategen/src/config.rs b/service_crategen/src/config.rs index 7cf1e0baa06..7c4f0eed271 100644 --- a/service_crategen/src/config.rs +++ b/service_crategen/src/config.rs @@ -10,7 +10,7 @@ use serde_json; use crate::cargo; -#[derive(Clone, Debug, Deserialize)] +#[derive(Debug, Deserialize)] pub struct ServiceConfig { pub version: String, #[serde(rename = "coreVersion")] diff --git a/service_crategen/src/main.rs b/service_crategen/src/main.rs index ae478d9befc..35fcb36dbef 100644 --- a/service_crategen/src/main.rs +++ b/service_crategen/src/main.rs @@ -73,6 +73,6 @@ fn main() { .values_of("service") .map(std::iter::Iterator::collect); - commands::generate::generate_services(&service_configs, out_dir, &service.as_ref()); + commands::generate::generate_services(&service_configs, out_dir, service.as_ref()); } } diff --git a/service_crategen/src/service.rs b/service_crategen/src/service.rs index 443e134dccf..0f0e7c99b70 100644 --- a/service_crategen/src/service.rs +++ b/service_crategen/src/service.rs @@ -151,12 +151,12 @@ impl<'b> Service<'b> { "query" | "ec2" => { dependencies.insert( "serde_urlencoded".to_owned(), - cargo::Dependency::Simple("0.5".into()), + cargo::Dependency::Simple("0.6".into()), ); - dependencies.insert("xml-rs".to_owned(), cargo::Dependency::Simple("0.7".into())); + dependencies.insert("xml-rs".to_owned(), cargo::Dependency::Simple("0.8".into())); } "rest-xml" => { - dependencies.insert("xml-rs".to_owned(), cargo::Dependency::Simple("0.7".into())); + dependencies.insert("xml-rs".to_owned(), cargo::Dependency::Simple("0.8".into())); } "rest-json" => { dependencies.insert( @@ -192,9 +192,9 @@ impl<'b> Service<'b> { "rusoto_mock".to_owned(), cargo::Dependency::Extended { path: Some("../../../mock".into()), - version: Some("0.40.0".into()), + version: Some("0.41.0".into()), optional: None, - default_features: None, + default_features: Some(false), features: None, }, ); diff --git a/skeptical/Cargo.toml b/skeptical/Cargo.toml index 079b9e90828..ea0241842bf 100644 --- a/skeptical/Cargo.toml +++ b/skeptical/Cargo.toml @@ -3,13 +3,14 @@ name = "skeptical" version = "0.1.0" authors = ["Matthew Mayer "] build = "build.rs" +edition = "2018" [dependencies] -rusoto_core = "0.40" -rusoto_dynamodb = "0.40" -rusoto_s3 = "0.40" -rusoto_ec2 = "0.40" -rusoto_sts = "0.40" +rusoto_core = "0.41" +rusoto_dynamodb = "0.41" +rusoto_s3 = "0.41" +rusoto_ec2 = "0.41" +rusoto_sts = "0.41" env_logger = "0.5" [build-dependencies]